hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
a9c49f20c9920657e228dc88e87a6de8327b6327.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=============================================================================================================
/**
* @file rapmusic_cuda.cu
* @author Christoph Dinh <[email protected]>;
* @version 1.0
* @date March, 2011
*
* @section LICENSE
*
* Copyright (C) 2011 Christoph Dinh. All rights reserved.
*
* No part of this program may be photocopied, reproduced,
* or translated to another program language without the
* prior written consent of the author.
*
*
* @brief ToDo Documentation...
*
*/
//*************************************************************************************************************
//=============================================================================================================
// CUDA INCLUDES
//=============================================================================================================
#include "../include/rapmusic_cuda.cuh"
#include "../include/cudadevice.cuh"
#include "../include/handle_error.cuh"
#include "../include/rapmusic_kernel.cuh"
#include "../include/cuhpcvalue.cuh"
//*************************************************************************************************************
//=============================================================================================================
// CPP INCLUDES
//=============================================================================================================
#include "../../cpp/include/eigeninterface.h"
#include "../../cpp/include/model.h"
#include "../../cpp/include/rapdipoles.h"
//*************************************************************************************************************
//=============================================================================================================
// DEFINE NAMESPACE HPCLib
//=============================================================================================================
namespace HPCLib
{
//*************************************************************************************************************
//=============================================================================================================
// USED NAMESPACES
//=============================================================================================================
//*************************************************************************************************************
//=============================================================================================================
// DEFINE MEMBER METHODS
//=============================================================================================================
RapMusic_Cuda::RapMusic_Cuda()
: m_iPairCols(6)
, m_iMaxBlocksPerMultiProcessor(8) //CUDA C Programming Guide - Appendix F
{
}
//*************************************************************************************************************
RapMusic_Cuda::~RapMusic_Cuda()
{
m_host_pLeadFieldMat = NULL;
//garbage collecting
//######## CUDA START ########
// free the memory allocated on the GPU
/*HANDLE_ERROR( hipFree( m_dev_pLeadFieldMat ) );*/
delete m_dev_pLeadFieldMat;
delete m_dev_pVecPairIdxCombinations;
//######## CUDA END ########
}
//*************************************************************************************************************
//template <class T>
bool RapMusic_Cuda::initRAPMusic( HPCLib::CudaDevice* p_pDeviceInfo,
HPCLib::Model<float>* p_pModel,
bool p_bSparsed, int p_iN, double p_dThr)
{
return initRAPMusic(p_pDeviceInfo,
p_bSparsed ? p_pModel->getSparsedLeadFieldMat() : p_pModel->getLeadFieldMat(),
p_bSparsed ? p_pModel->getSparsedGridMat() : p_pModel->getGridMat(),
p_iN, p_dThr);
}
//*************************************************************************************************************
//template <class T>
bool RapMusic_Cuda::initRAPMusic( HPCLib::CudaDevice* p_pDeviceInfo,
HPCMatrix<float>* p_pMatLeadField,
HPCMatrix<float>* p_pMatGrid,
int p_iN, double p_dThr)
{
m_iMultiProcessorCount = p_pDeviceInfo->getSelectedDeviceProperties().multiProcessorCount;//14;
m_iWarpSize = p_pDeviceInfo->getSelectedDeviceProperties().warpSize;//32;
m_iMaxThreadsPerMultiProcessor = p_pDeviceInfo->getSelectedDeviceProperties().maxThreadsPerMultiProcessor;//1536;
m_iSharedMemoryPerMultiProcessor = p_pDeviceInfo->getSelectedDeviceProperties().sharedMemPerBlock;//48*1024;
cublasStatus status = hipblasInit ();
//Initialize RAP-MUSIC
std::cout << "##### Initialization CUDA RAP MUSIC started ######\n\n";
m_iN = p_iN;
m_dThreshold = p_dThr;
//Grid check
if(p_pMatGrid != NULL)
{
if ( p_pMatGrid->rows() != p_pMatLeadField->cols() / 3 )
{
std::cout << "Grid does not fit to given Lead Field!\n";
return false;
}
}
m_pMatGrid = p_pMatGrid;
//Lead Fiel check
if ( p_pMatLeadField->cols() % 3 != 0 )
{
std::cout << "Lead Field is not associated with a 3D grid!\n";
return false;
}
m_pMatLeadField = p_pMatLeadField;
m_dev_pLeadFieldMat = new cuHPCMatrix<float>(*p_pMatLeadField);//### CUDA ###
m_iNumGridPoints = (int)(m_dev_pLeadFieldMat->cols()/3);
m_iNumChannels = m_dev_pLeadFieldMat->rows();
//##### Calc lead field combination #####
std::cout << "Calculate lead field combinations. \n";
m_iNumLeadFieldCombinations = nchoose2(m_iNumGridPoints+1);
//######## CUDA START ########
// allocate device vector
m_dev_pVecPairIdxCombinations = new thrust::device_vector<int>(2 * m_iNumLeadFieldCombinations);
// obtain raw pointer to device vectors memory -> for usage in kernel
m_dev_pPairIdxCombinations = thrust::raw_pointer_cast(&(*m_dev_pVecPairIdxCombinations)[0]);
hipLaunchKernelGGL(( cuCalcPairCombinations), dim3(128),dim3(1), 0, 0, m_iNumGridPoints, m_iNumLeadFieldCombinations, m_dev_pPairIdxCombinations);
//######## CUDA END ########
std::cout << "Lead Field combinations calculated. \n\n";
//##### Calc lead field combination end #####
std::cout << "Number of grid points: " << m_iNumGridPoints << "\n\n";
std::cout << "Number of combinated points: " << m_iNumLeadFieldCombinations << "\n\n";
std::cout << "Number of sources to find: " << m_iN << "\n\n";
std::cout << "Threshold: " << m_dThreshold << "\n\n";
//Init end
std::cout << "##### Initialization CUDA RAP MUSIC completed ######\n\n\n";
m_bIsInit = true;
return m_bIsInit;
}
//*************************************************************************************************************
bool RapMusic_Cuda::calcRapMusic(HPCMatrix<float>* p_pMatMeasurement, RapDipoles<float>*& p_pRapDipoles)
{
//if not initialized -> break
if(!m_bIsInit)
{
std::cout << "RAP-Music wasn't initialized!"; //ToDo: catch this earlier
return false;
}
//Test if data are correct
if(p_pMatMeasurement->rows() != m_iNumChannels)
{
std::cout << "Lead Field channels do not fit to number of measurement channels!"; //ToDo: catch this earlier
return false;
}
// //Inits
// //Stop the time for benchmark purpose
// clock_t start, end;
// start = clock();
//Calculate the signal subspace (t_dev_pMatPhi_s)
cuHPCMatrix<float>* t_dev_pMatPhi_s = NULL;//(m_iNumChannels, t_r < m_iN ? m_iN : t_r);
//separate kernel for calcPhi_s -> not possible because measurement is often too big for shared memory
int t_r = calcPhi_s(*p_pMatMeasurement, t_dev_pMatPhi_s);
int t_iMaxSearch = m_iN < t_r ? m_iN : t_r; //The smallest of Rank and Iterations
if (t_r < m_iN)
{
std::cout << "Warning: Rank " << t_r << " of the measurement data is smaller than the " << m_iN;
std::cout << " sources to find." << std::endl;
std::cout << " Searching now for " << t_iMaxSearch << " correlated sources.";
std::cout << std::endl << std::endl;
}
//Create Orthogonal Projector
//OrthProj
HPCMatrix<float> t_matOrthProj(m_iNumChannels,m_iNumChannels);
t_matOrthProj.setIdentity();
cuHPCMatrix<float>* t_dev_pMatOrthProj = new cuHPCMatrix<float>(t_matOrthProj);//### CUDA ###
//A_k_1
HPCMatrix<float> t_matA_k_1(m_iNumChannels,t_iMaxSearch);
t_matA_k_1.reset(0.0);//setZero();
if (m_pMatGrid != NULL)
{
if(p_pRapDipoles != NULL)
p_pRapDipoles->initRapDipoles(m_pMatGrid);
else
p_pRapDipoles = new RapDipoles<float>(m_pMatGrid);
}
else
{
if(p_pRapDipoles != NULL)
delete p_pRapDipoles;
p_pRapDipoles = new RapDipoles<float>();
}
std::cout << "##### Calculation of CUDA RAP MUSIC started ######\n\n";
cuHPCMatrix<float>* t_dev_pMatProj_Phi_s = new cuHPCMatrix<float>(t_matOrthProj.rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
//new Version: Calculate projection before
HPCMatrix<float> t_matProj_LeadField(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());
cuHPCMatrix<float>* t_dev_pMatProj_LeadField = new cuHPCMatrix<float>(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());//### CUDA ###
for(int r = 0; r < t_iMaxSearch; ++r)
{
//ToDO needs to be checked whether using device pointer is performant
//t_dev_pMatProj_Phi_s = t_dev_pMatOrthProj*t_dev_pMatPhi_s;
t_dev_pMatProj_Phi_s->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*t_dev_pMatPhi_s);//### CUDA ###
//new Version: Calculating Projection before -> ToDo remove this later on
t_matProj_LeadField = t_matOrthProj * (*m_pMatLeadField);//Subtract the found sources from the current found source
//t_dev_pMatProj_LeadField = t_dev_pMatOrthProj*m_dev_pLeadFieldMat;
t_dev_pMatProj_LeadField->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*m_dev_pLeadFieldMat);//### CUDA ###
//###First Option###
//Step 1: lt. Mosher 1998 -> Maybe tmp_Proj_Phi_S is already orthogonal -> so no SVD needed -> U_B = tmp_Proj_Phi_S;
cuHPCMatrix<float>* t_dev_pMatU_B = new cuHPCMatrix<float>(t_dev_pMatPhi_s->rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
cuHPCValue<int> t_dev_iRank(0);//### CUDA ###
int t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
int t_iTh_z = 2;//1;
int t_iMatSize_U_B = t_dev_pMatPhi_s->rows() * t_dev_pMatPhi_s->cols();
int t_iWMatSize_U_B = t_dev_pMatPhi_s->cols();
int t_iCacheYZSize_U_B = t_iTh_y*t_iTh_z;
int t_iSVDCache_U_B = t_dev_pMatPhi_s->cols()+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMem_U_B = sizeof(float) * (t_iMatSize_U_B + t_iWMatSize_U_B + t_iCacheYZSize_U_B + t_iSVDCache_U_B);
dim3 blocks = dim3( 1, 1);
dim3 threads = dim3( 1, t_iTh_y, t_iTh_z);
hipLaunchKernelGGL(( cuCalcU_B), dim3(blocks), dim3(threads), t_iSharedMem_U_B , 0,
t_dev_pMatProj_Phi_s->data(),
t_dev_pMatProj_Phi_s->rows(),
t_dev_pMatProj_Phi_s->cols(),
t_dev_pMatU_B->data(),
t_dev_iRank.data());
HANDLE_ERROR( hipDeviceSynchronize() ); //to ensure that the kernel has completed
int t_iFullRank_U_B = t_dev_iRank.toHostValue();
HPCMatrix<float> t_matU_B(t_dev_pMatProj_Phi_s->rows(), t_iFullRank_U_B);
// copy the array back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( t_matU_B.data(), t_dev_pMatU_B->data(),
t_iFullRank_U_B * t_dev_pMatProj_Phi_s->rows() * sizeof(float),
hipMemcpyDeviceToHost ) );
//ToDo - better to resize - drop no longer needed columns
delete t_dev_pMatU_B;
t_dev_pMatU_B = new cuHPCMatrix<float>(t_matU_B);//### CUDA ###
//######## CUDA START ########
// allocate device vector
thrust::device_vector<float> t_dev_vecRoh(m_iNumLeadFieldCombinations);
// obtain raw pointer to device vectors memory -> for usage in kernel
float * t_dev_pRoh = thrust::raw_pointer_cast(&t_dev_vecRoh[0]);
//######## CUDA END ########
// subcorr GPU
//######## CUDA START ########
hipEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
t_iTh_z = 2;//1;
int t_iPairMatSize = m_dev_pLeadFieldMat->rows() * m_iPairCols;
int t_iWMatSize = m_iPairCols;
int t_iCorMatSize = m_iPairCols*t_iFullRank_U_B;
int t_iCacheYZSize = t_iTh_y*t_iTh_z;
int t_iSVDCache = m_iPairCols+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMemPerPairMat = sizeof(float) * (t_iPairMatSize + t_iWMatSize + t_iCorMatSize + t_iCacheYZSize + t_iSVDCache);
int t_iPairMatsPerMultiProcessor = m_iSharedMemoryPerMultiProcessor/t_iSharedMemPerPairMat;
std::cout << "Shared Memory Usage: " << t_iSharedMemPerPairMat << " Byte x " << t_iPairMatsPerMultiProcessor << std::endl;
int t_iPairMatsPerBlock = ceil((float)(t_iPairMatsPerMultiProcessor)/(float)m_iMaxBlocksPerMultiProcessor);//=threadDim.x
/*DIRTY HACK*/ t_iPairMatsPerBlock = 2;//t_iPairMatsPerBlock > 2 ? 2 : t_iPairMatsPerBlock;//ToDo Debug when 3 Mats per Block we get the wrong result
std::cout << "Pair Mats per Block: " << t_iPairMatsPerBlock << std::endl;
size_t t_iSharedMemPerBlock = t_iSharedMemPerPairMat * t_iPairMatsPerBlock;
blocks = dim3( /*7381*/ 64*m_iMultiProcessorCount, 1);
threads = dim3( t_iPairMatsPerBlock, t_iTh_y, t_iTh_z);
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipLaunchKernelGGL(( RapMusicSubcorr), dim3(blocks), dim3(threads), t_iSharedMemPerBlock, 0,
t_dev_pMatProj_LeadField->data(),
t_dev_pMatProj_LeadField->rows(),
t_dev_pMatProj_LeadField->cols(),
m_dev_pPairIdxCombinations,
m_iNumLeadFieldCombinations,
t_dev_pMatU_B->data(),
t_iFullRank_U_B,
t_dev_pRoh );
HANDLE_ERROR( hipDeviceSynchronize() ); //to ensure that the kernel has completed
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
// free events
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
std::cout << "Time Elapsed: " << elapsedTime << " ms" << std::endl;
//######## CUDA END ########
//Find the maximum of correlation
//######## CUDA THRUST START ########
//max_element returns an iterator, so to convert that into a position we subtract the iterator at the beginning of the vector.
int t_iMaxIdx = thrust::max_element(t_dev_vecRoh.begin(), t_dev_vecRoh.end()) - t_dev_vecRoh.begin();
float t_val_roh_k = t_dev_vecRoh[t_iMaxIdx];
//######## THRUST CUDA END ########
//get positions in sparsed leadfield from index combinations;
int t_iIdx1 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx];
int t_iIdx2 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx+1];
// (Idx+1) because of MATLAB positions -> starting with 1 not with 0
std::cout << "Iteration: " << r+1 << " of " << t_iMaxSearch
<< "; Correlation: " << t_val_roh_k<< "; Position (Idx+1): " << t_iIdx1+1 << " - " << t_iIdx2+1 <<"\n\n";
//Calculations with the max correlated dipole pair G_k_1
HPCMatrix<float> t_matG_k_1(t_matProj_LeadField.rows(),6);
getLeadFieldPair(*m_pMatLeadField, t_matG_k_1, t_iIdx1, t_iIdx2);
HPCMatrix<float> t_matProj_G_k_1(t_matOrthProj.rows(), t_matG_k_1.cols());
t_matProj_G_k_1 = t_matOrthProj * t_matG_k_1;//Subtract the found sources from the current found source
//Calculate source direction
//source direction (p_pMatPhi) for current source r (phi_k_1)
HPCMatrix<float> t_vec_phi_k_1(6, 1);
subcorr(t_matProj_G_k_1, t_matU_B, t_vec_phi_k_1);//Correlate the current source to calculate the direction
//Set return values
p_pRapDipoles->insertSource(t_iIdx1, t_iIdx2, t_vec_phi_k_1.data(), t_val_roh_k);
//Stop Searching when Correlation is smaller then the Threshold
if (t_val_roh_k < m_dThreshold)
{
std::cout << "Searching stopped, last correlation " << t_val_roh_k;
std::cout << " is smaller then the given threshold " << m_dThreshold << std::endl << std::endl;
break;
}
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
calcA_k_1(t_matG_k_1, t_vec_phi_k_1, r, t_matA_k_1);
//Calculate new orthogonal Projector (Pi_k_1)
calcOrthProj(t_matA_k_1, t_matOrthProj);
//#### CUDA START ####
HANDLE_ERROR( hipMemcpy( t_dev_pMatOrthProj->data(),
t_matOrthProj.data(),
sizeof(float) * t_matOrthProj.size(),
hipMemcpyHostToDevice ) );
//#### CUDA END ####
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatU_B;
// free the memory we allocated on the CPU
}
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatProj_LeadField;
delete t_dev_pMatProj_Phi_s;
delete t_dev_pMatOrthProj;
delete t_dev_pMatPhi_s;
// free the memory we allocated on the CPU
std::cout << "##### Calculation of CUDA RAP MUSIC completed ######\n\n";
// end = clock();
//
// float t_fElapsedTime = ( (float)(end-start) / (float)CLOCKS_PER_SEC ) * 1000.0f;
// std::cout << "Time Elapsed: " << t_fElapsedTime << " ms" << std::endl << std::endl;
//garbage collecting
//ToDo
return true;
}
//*************************************************************************************************************
bool RapMusic_Cuda::calcPowellRAPMusic(HPCMatrix<float>* p_pMatMeasurement, RapDipoles<float>*& p_pRapDipoles)
{
//if not initialized -> break
if(!m_bIsInit)
{
std::cout << "RAP-Music wasn't initialized!"; //ToDo: catch this earlier
return false;
}
//Test if data are correct
if(p_pMatMeasurement->rows() != m_iNumChannels)
{
std::cout << "Lead Field channels do not fit to number of measurement channels!"; //ToDo: catch this earlier
return false;
}
// //Inits
// //Stop the time for benchmark purpose
// clock_t start, end;
// start = clock();
//Calculate the signal subspace (t_dev_pMatPhi_s)
cuHPCMatrix<float>* t_dev_pMatPhi_s = NULL;//(m_iNumChannels, t_r < m_iN ? m_iN : t_r);
//separate kernel for calcPhi_s -> not possible because measurement is often too big for shared memory
int t_r = calcPhi_s(*p_pMatMeasurement, t_dev_pMatPhi_s);
int t_iMaxSearch = m_iN < t_r ? m_iN : t_r; //The smallest of Rank and Iterations
if (t_r < m_iN)
{
std::cout << "Warning: Rank " << t_r << " of the measurement data is smaller than the " << m_iN;
std::cout << " sources to find." << std::endl;
std::cout << " Searching now for " << t_iMaxSearch << " correlated sources.";
std::cout << std::endl << std::endl;
}
//Create Orthogonal Projector
//OrthProj
HPCMatrix<float> t_matOrthProj(m_iNumChannels,m_iNumChannels);
t_matOrthProj.setIdentity();
cuHPCMatrix<float>* t_dev_pMatOrthProj = new cuHPCMatrix<float>(t_matOrthProj);//### CUDA ###
//A_k_1
HPCMatrix<float> t_matA_k_1(m_iNumChannels,t_iMaxSearch);
t_matA_k_1.reset(0.0);//setZero();
if (m_pMatGrid != NULL)
{
if(p_pRapDipoles != NULL)
p_pRapDipoles->initRapDipoles(m_pMatGrid);
else
p_pRapDipoles = new RapDipoles<float>(m_pMatGrid);
}
else
{
if(p_pRapDipoles != NULL)
delete p_pRapDipoles;
p_pRapDipoles = new RapDipoles<float>();
}
std::cout << "##### Calculation of CUDA RAP MUSIC started ######\n\n";
cuHPCMatrix<float>* t_dev_pMatProj_Phi_s = new cuHPCMatrix<float>(t_matOrthProj.rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
//new Version: Calculate projection before
HPCMatrix<float> t_matProj_LeadField(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());
cuHPCMatrix<float>* t_dev_pMatProj_LeadField = new cuHPCMatrix<float>(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());//### CUDA ###
for(int r = 0; r < t_iMaxSearch; ++r)
{
//ToDO needs to be checked whether using device pointer is performant
//t_dev_pMatProj_Phi_s = t_dev_pMatOrthProj*t_dev_pMatPhi_s;
t_dev_pMatProj_Phi_s->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*t_dev_pMatPhi_s);//### CUDA ###
//new Version: Calculating Projection before -> ToDo remove this later on
t_matProj_LeadField = t_matOrthProj * (*m_pMatLeadField);//Subtract the found sources from the current found source
//t_dev_pMatProj_LeadField = t_dev_pMatOrthProj*m_dev_pLeadFieldMat;
t_dev_pMatProj_LeadField->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*m_dev_pLeadFieldMat);//### CUDA ###
//###First Option###
//Step 1: lt. Mosher 1998 -> Maybe tmp_Proj_Phi_S is already orthogonal -> so no SVD needed -> U_B = tmp_Proj_Phi_S;
cuHPCMatrix<float>* t_dev_pMatU_B = new cuHPCMatrix<float>(t_dev_pMatPhi_s->rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
cuHPCValue<int> t_dev_iRank(0);//### CUDA ###
int t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
int t_iTh_z = 2;//1;
int t_iMatSize_U_B = t_dev_pMatPhi_s->rows() * t_dev_pMatPhi_s->cols();
int t_iWMatSize_U_B = t_dev_pMatPhi_s->cols();
int t_iCacheYZSize_U_B = t_iTh_y*t_iTh_z;
int t_iSVDCache_U_B = t_dev_pMatPhi_s->cols()+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMem_U_B = sizeof(float) * (t_iMatSize_U_B + t_iWMatSize_U_B + t_iCacheYZSize_U_B + t_iSVDCache_U_B);
dim3 blocks = dim3( 1, 1);
dim3 threads = dim3( 1, t_iTh_y, t_iTh_z);
hipLaunchKernelGGL(( cuCalcU_B), dim3(blocks), dim3(threads), t_iSharedMem_U_B , 0,
t_dev_pMatProj_Phi_s->data(),
t_dev_pMatProj_Phi_s->rows(),
t_dev_pMatProj_Phi_s->cols(),
t_dev_pMatU_B->data(),
t_dev_iRank.data());
HANDLE_ERROR( hipDeviceSynchronize() ); //to ensure that the kernel has completed
int t_iFullRank_U_B = t_dev_iRank.toHostValue();
HPCMatrix<float> t_matU_B(t_dev_pMatProj_Phi_s->rows(), t_iFullRank_U_B);
// copy the array back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( t_matU_B.data(), t_dev_pMatU_B->data(),
t_iFullRank_U_B * t_dev_pMatProj_Phi_s->rows() * sizeof(float),
hipMemcpyDeviceToHost ) );
//ToDo - better to resize - drop no longer needed columns
delete t_dev_pMatU_B;
t_dev_pMatU_B = new cuHPCMatrix<float>(t_matU_B);//### CUDA ###
//######## CUDA START ########
// allocate device vector
thrust::device_vector<float> t_dev_vecRoh(m_iNumLeadFieldCombinations);
// obtain raw pointer to device vectors memory -> for usage in kernel
float * t_dev_pRoh = thrust::raw_pointer_cast(&t_dev_vecRoh[0]);
// allocate device vector
thrust::device_vector<int> t_dev_vecRowIndezes(m_iNumGridPoints);
// obtain raw pointer to device vectors memory -> for usage in kernel
int * t_dev_pRowIndezes = thrust::raw_pointer_cast(&t_dev_vecRowIndezes[0]);
//######## CUDA END ########
// subcorr GPU
//Powell
int t_iCurrentRow = 2;
int t_iIdx1 = -1;
int t_iIdx2 = -1;
int t_iMaxIdx_old = -1;
int t_iMaxIdx = -1;
int t_iMaxFound = 0;
float t_val_roh_k = 0;
int t_iNumVecElements = m_iNumGridPoints;
while(t_iMaxFound == 0)
{
//######## CUDA START ########
hipEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
//Powell Indizes
hipLaunchKernelGGL(( cuPowellIdxVec), dim3(32), dim3(32), 0, 0, t_iCurrentRow,
t_iNumVecElements,
t_dev_pRowIndezes );
HANDLE_ERROR( hipDeviceSynchronize() ); //to ensure that the kernel has completed
// //DEBUG
// thrust::host_vector<int> h_vec(m_iNumGridPoints);
// // transfer data back to host
// thrust::copy(t_dev_vecRowIndezes.begin(), t_dev_vecRowIndezes.end(), h_vec.begin());
// std::cout << "indezes" << std::endl;
// for(int i = 0; i < 10; ++i)
// std::cout << h_vec[i] << std::endl;
// //DEBUG
t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
t_iTh_z = 2;//1;
int t_iPairMatSize = m_dev_pLeadFieldMat->rows() * m_iPairCols;
int t_iWMatSize = m_iPairCols;
int t_iCorMatSize = m_iPairCols*t_iFullRank_U_B;
int t_iCacheYZSize = t_iTh_y*t_iTh_z;
int t_iSVDCache = m_iPairCols+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMemPerPairMat = sizeof(float) * (t_iPairMatSize + t_iWMatSize + t_iCorMatSize + t_iCacheYZSize + t_iSVDCache);
int t_iPairMatsPerMultiProcessor = m_iSharedMemoryPerMultiProcessor/t_iSharedMemPerPairMat;
std::cout << "Shared Memory Usage: " << t_iSharedMemPerPairMat << " Byte x " << t_iPairMatsPerMultiProcessor << std::endl;
int t_iPairMatsPerBlock = ceil((float)(t_iPairMatsPerMultiProcessor)/(float)m_iMaxBlocksPerMultiProcessor);//=threadDim.x
/*DIRTY HACK*/ t_iPairMatsPerBlock = 2;//t_iPairMatsPerBlock > 2 ? 2 : t_iPairMatsPerBlock;//ToDo Debug when 3 Mats per Block we get the wrong result
std::cout << "Pair Mats per Block: " << t_iPairMatsPerBlock << std::endl;
size_t t_iSharedMemPerBlock = t_iSharedMemPerPairMat * t_iPairMatsPerBlock;
blocks = dim3( /*7381*/ 64*m_iMultiProcessorCount, 1);
threads = dim3( t_iPairMatsPerBlock, t_iTh_y, t_iTh_z);
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipLaunchKernelGGL(( PowellRapMusicSubcorr), dim3(blocks), dim3(threads), t_iSharedMemPerBlock, 0,
t_dev_pMatProj_LeadField->data(),
t_dev_pMatProj_LeadField->rows(),
t_dev_pMatProj_LeadField->cols(),
m_dev_pPairIdxCombinations,
t_dev_pRowIndezes,
t_iNumVecElements,
t_dev_pMatU_B->data(),
t_iFullRank_U_B,
t_dev_pRoh );
HANDLE_ERROR( hipDeviceSynchronize() ); //to ensure that the kernel has completed
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
// free events
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
std::cout << "Time Elapsed: " << elapsedTime << " ms" << std::endl;
//######## CUDA END ########
//Find the maximum of correlation
//######## CUDA THRUST START ########
//max_element returns an iterator, so to convert that into a position we subtract the iterator at the beginning of the vector.
t_iMaxIdx = thrust::max_element(t_dev_vecRoh.begin(), t_dev_vecRoh.end()) - t_dev_vecRoh.begin();
t_val_roh_k = t_dev_vecRoh[t_iMaxIdx];
//######## THRUST CUDA END ########
//Powell
if(t_iMaxIdx == t_iMaxIdx_old)
{
t_iMaxFound = 1;
break;
}
else
{
t_iMaxIdx_old = t_iMaxIdx;
//get positions in sparsed leadfield from index combinations;
t_iIdx1 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx];
t_iIdx2 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx+1];
}
//set new index
if(t_iIdx1 == t_iCurrentRow)
t_iCurrentRow = t_iIdx2;
else
t_iCurrentRow = t_iIdx1;
}
// (Idx+1) because of MATLAB positions -> starting with 1 not with 0
std::cout << "Iteration: " << r+1 << " of " << t_iMaxSearch
<< "; Correlation: " << t_val_roh_k<< "; Position (Idx+1): " << t_iIdx1+1 << " - " << t_iIdx2+1 <<"\n\n";
//Calculations with the max correlated dipole pair G_k_1
HPCMatrix<float> t_matG_k_1(t_matProj_LeadField.rows(),6);
getLeadFieldPair(*m_pMatLeadField, t_matG_k_1, t_iIdx1, t_iIdx2);
HPCMatrix<float> t_matProj_G_k_1(t_matOrthProj.rows(), t_matG_k_1.cols());
t_matProj_G_k_1 = t_matOrthProj * t_matG_k_1;//Subtract the found sources from the current found source
//Calculate source direction
//source direction (p_pMatPhi) for current source r (phi_k_1)
HPCMatrix<float> t_vec_phi_k_1(6, 1);
subcorr(t_matProj_G_k_1, t_matU_B, t_vec_phi_k_1);//Correlate the current source to calculate the direction
//Set return values
p_pRapDipoles->insertSource(t_iIdx1, t_iIdx2, t_vec_phi_k_1.data(), t_val_roh_k);
//Stop Searching when Correlation is smaller then the Threshold
if (t_val_roh_k < m_dThreshold)
{
std::cout << "Searching stopped, last correlation " << t_val_roh_k;
std::cout << " is smaller then the given threshold " << m_dThreshold << std::endl << std::endl;
break;
}
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
calcA_k_1(t_matG_k_1, t_vec_phi_k_1, r, t_matA_k_1);
//Calculate new orthogonal Projector (Pi_k_1)
calcOrthProj(t_matA_k_1, t_matOrthProj);
//#### CUDA START ####
HANDLE_ERROR( hipMemcpy( t_dev_pMatOrthProj->data(),
t_matOrthProj.data(),
sizeof(float) * t_matOrthProj.size(),
hipMemcpyHostToDevice ) );
//#### CUDA END ####
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatU_B;
// free the memory we allocated on the CPU
}
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatProj_LeadField;
delete t_dev_pMatProj_Phi_s;
delete t_dev_pMatOrthProj;
delete t_dev_pMatPhi_s;
// free the memory we allocated on the CPU
std::cout << "##### Calculation of CUDA RAP MUSIC completed ######\n\n";
// end = clock();
//
// float t_fElapsedTime = ( (float)(end-start) / (float)CLOCKS_PER_SEC ) * 1000.0f;
// std::cout << "Time Elapsed: " << t_fElapsedTime << " ms" << std::endl << std::endl;
//garbage collecting
//ToDo
return true;
}
//*************************************************************************************************************
int RapMusic_Cuda::nchoose2(int n)
{
//nchoosek(n, k) with k = 2, equals n*(n-1)*0.5
int t_iNumOfCombination = (int)(n*(n-1)*0.5);
return t_iNumOfCombination;
}
//*************************************************************************************************************
//template <class T>
int RapMusic_Cuda/*<T>*/::calcPhi_s(const HPCMatrix<float>& p_pMatMeasurement, cuHPCMatrix<float>* &p_dev_pMatPhi_s)
{
//Calculate p_dev_pMatPhi_s
HPCMatrix<float> t_matF;
if (p_pMatMeasurement.cols() > p_pMatMeasurement.rows())
{
t_matF = makeSquareMat(p_pMatMeasurement); //FF^T
}
else
{
t_matF = p_pMatMeasurement;
}
SVD phi_sSVD(t_matF, 1);
int t_r = getRank(phi_sSVD.singularValues());
int t_iCols = t_r; //t_r < m_iN ? m_iN : t_r;
if (p_dev_pMatPhi_s != NULL)
delete p_dev_pMatPhi_s;
//m_iNumChannels has to be equal to t_svdF.matrixU().rows()
p_dev_pMatPhi_s = new cuHPCMatrix<float>(m_iNumChannels, t_iCols);
//assign the signal subspace
// copy the array from the CPU to the GPU
HANDLE_ERROR( hipMemcpy( p_dev_pMatPhi_s->data(), phi_sSVD.matrixU()->data(),
sizeof(float) * m_iNumChannels *t_iCols,
hipMemcpyHostToDevice ) );
//ToDO Use jojos svd instead of cula
//garbage collecting
return t_r;
}
//*************************************************************************************************************
//Direction Subcorr
float RapMusic_Cuda::subcorr(HPCMatrix<float>& p_matProj_G, HPCMatrix<float>& p_matU_B, HPCMatrix<float>& p_vec_phi_k_1)
{
//Orthogonalisierungstest wegen performance weggelassen -> ohne is es viel schneller
SVD t_svdProj_G(p_matProj_G, 3);
HPCMatrix<float> U_A_T(6, t_svdProj_G.matrixU()->rows());
U_A_T = t_svdProj_G.matrixU()->transpose();
HPCMatrix<float>* sigma_A = t_svdProj_G.singularValues();
HPCMatrix<float>* V_A = t_svdProj_G.matrixV();
//lt. Mosher 1998 ToDo: Only Retain those Components of U_A and U_B that correspond to nonzero singular values
//for U_A and U_B the number of columns corresponds to their ranks
//-> reduce to rank only when directions aren't calculated, otherwise use the full U_A_T
HPCMatrix<float> Cor(6, p_matU_B.cols());
//Step 2: compute the subspace correlation
Cor = U_A_T*p_matU_B;//lt. Mosher 1998: C = U_A^T * U_B
HPCMatrix<float>* t_vecSigma_C;
//Step 4
HPCMatrix<float>* U_C;
if (Cor.cols() > Cor.rows())
{
Cor = Cor.transpose();//adjoint(); //for complex it has to be adjunct
SVD svdOfCor_H(Cor, 2);
U_C = new HPCMatrix<float>(svdOfCor_H.matrixV()->rows(), svdOfCor_H.matrixV()->cols());
//because Cor Hermitesch U and V are exchanged
memcpy(U_C->data(),svdOfCor_H.matrixV()->data(),(U_C->size()*sizeof(float)));
t_vecSigma_C = new HPCMatrix<float>(svdOfCor_H.singularValues()->rows(), svdOfCor_H.singularValues()->cols());
memcpy(t_vecSigma_C->data(),svdOfCor_H.singularValues()->data(),(t_vecSigma_C->size()*sizeof(float)));
}
else
{
SVD svdOfCor(Cor, 1);
U_C = new HPCMatrix<float>(svdOfCor.matrixU()->rows(), svdOfCor.matrixU()->cols());
memcpy(U_C->data(),svdOfCor.matrixU()->data(),(U_C->size()*sizeof(float)));
t_vecSigma_C = new HPCMatrix<float>(svdOfCor.singularValues()->rows(), svdOfCor.singularValues()->cols());
memcpy(t_vecSigma_C->data(),svdOfCor.singularValues()->data(),(t_vecSigma_C->size()*sizeof(float)));
}
//invert sigma A
HPCMatrix<float> sigma_a_inv(sigma_A->rows(), sigma_A->rows());
for (int i = 0; i < sigma_A->rows(); ++i)
{
sigma_a_inv(i,i) = 1/sigma_A->data()[i];
}
HPCMatrix<float> X(6,U_C->cols());
X = ((*V_A)*sigma_a_inv)*(*U_C);//X = V_A*Sigma_A^-1*U_C
float norm_X = 0;
for(int i = 0; i < 6; ++i)
norm_X += pow(X.data()[i], 2);
norm_X = 1/sqrt(norm_X);
//Multiply a scalar with an Array -> linear transform
for(int i = 0; i < 6; ++i)
p_vec_phi_k_1.data()[i] = X.data()[i]*norm_X;//u1 = x1/||x1|| this is the orientation
//Step 3
float ret_sigma_C;
ret_sigma_C = t_vecSigma_C->data()[0]; //Take only the correlation of the first principal components
//garbage collecting
delete U_C;
delete t_vecSigma_C;
return ret_sigma_C;
}
//*************************************************************************************************************
void RapMusic_Cuda::calcA_k_1( const HPCMatrix<float>& p_matG_k_1,
const HPCMatrix<float>& p_matPhi_k_1,
const int p_iIdxk_1,
HPCMatrix<float>& p_matA_k_1)
{
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
HPCMatrix<float> t_vec_a_theta_k_1(p_matG_k_1.rows(),1);
t_vec_a_theta_k_1 = p_matG_k_1*p_matPhi_k_1; // a_theta_k_1 = G_k_1*phi_k_1 this corresponds to the normalized signal component in subspace r
memcpy( p_matA_k_1.data()+p_iIdxk_1*p_matA_k_1.rows(),
t_vec_a_theta_k_1.data(),
(p_matA_k_1.rows()*sizeof(float)));
}
//*************************************************************************************************************
void RapMusic_Cuda::calcOrthProj(const HPCMatrix<float>& p_matA_k_1, HPCMatrix<float>& p_matOrthProj)
{
//Calculate OrthProj=I-A_k_1*(A_k_1'*A_k_1)^-1*A_k_1' //Wetterling -> A_k_1 = Gain
HPCMatrix<float> t_matA_k_1_tmp(p_matA_k_1.cols(), p_matA_k_1.cols());
t_matA_k_1_tmp = p_matA_k_1.transpose()/*adjoint()*/*p_matA_k_1;//A_k_1'*A_k_1 = A_k_1_tmp -> A_k_1' has to be adjoint for complex
int t_size = t_matA_k_1_tmp.cols();
while (!t_matA_k_1_tmp(t_size-1,t_size-1))
{
--t_size;
}
HPCMatrix<float> t_matA_k_1_tmp_inv(t_matA_k_1_tmp.rows(), t_matA_k_1_tmp.cols());
HPCMatrix<float> t_matA_k_1_tmpsubmat = t_matA_k_1_tmp.get(0,0,t_size,t_size);
LU t_matA_k_1_LU(&t_matA_k_1_tmpsubmat);
for(int i = 0; i < t_matA_k_1_tmpsubmat.rows(); ++i)
for(int j = 0; j < t_matA_k_1_tmpsubmat.cols(); ++j)
t_matA_k_1_tmp_inv(i,j) = t_matA_k_1_LU.invert()(i,j);//(A_k_1_tmp)^-1 = A_k_1_tmp_inv
t_matA_k_1_tmp.resize(p_matA_k_1.rows(), p_matA_k_1.cols());
t_matA_k_1_tmp = p_matA_k_1*t_matA_k_1_tmp_inv;//(A_k_1*A_k_1_tmp_inv) = A_k_1_tmp
HPCMatrix<float> t_matA_k_1_tmp2(p_matA_k_1.rows(), p_matA_k_1.rows());
t_matA_k_1_tmp2 = t_matA_k_1_tmp*p_matA_k_1.transpose();//adjoint();//(A_k_1_tmp)*A_k_1' -> here A_k_1' is only transposed - it has to be adjoint
HPCMatrix<float> I(m_iNumChannels,m_iNumChannels);
I.diag(1.0);//setIdentity();
p_matOrthProj = I-t_matA_k_1_tmp2; //OrthProj=I-A_k_1*(A_k_1'*A_k_1)^-1*A_k_1';
// //garbage collecting
// //ToDo
}
//*************************************************************************************************************
//ToDo don't make a real copy
void RapMusic_Cuda::getLeadFieldPair( HPCMatrix<float>& p_matLeadField,
HPCMatrix<float>& p_matLeadField_Pair,
int p_iIdx1, int p_iIdx2)
{
memcpy( p_matLeadField_Pair.data(),
p_matLeadField.data()+p_iIdx1*3*p_matLeadField.rows(),
(p_matLeadField.rows()*3*sizeof(float)));
memcpy( p_matLeadField_Pair.data()+3*p_matLeadField.rows(),
p_matLeadField.data()+p_iIdx2*3*p_matLeadField.rows(),
(p_matLeadField.rows()*3*sizeof(float)));
}
}//Namespace
|
a9c49f20c9920657e228dc88e87a6de8327b6327.cu
|
//=============================================================================================================
/**
* @file rapmusic_cuda.cu
* @author Christoph Dinh <[email protected]>;
* @version 1.0
* @date March, 2011
*
* @section LICENSE
*
* Copyright (C) 2011 Christoph Dinh. All rights reserved.
*
* No part of this program may be photocopied, reproduced,
* or translated to another program language without the
* prior written consent of the author.
*
*
* @brief ToDo Documentation...
*
*/
//*************************************************************************************************************
//=============================================================================================================
// CUDA INCLUDES
//=============================================================================================================
#include "../include/rapmusic_cuda.cuh"
#include "../include/cudadevice.cuh"
#include "../include/handle_error.cuh"
#include "../include/rapmusic_kernel.cuh"
#include "../include/cuhpcvalue.cuh"
//*************************************************************************************************************
//=============================================================================================================
// CPP INCLUDES
//=============================================================================================================
#include "../../cpp/include/eigeninterface.h"
#include "../../cpp/include/model.h"
#include "../../cpp/include/rapdipoles.h"
//*************************************************************************************************************
//=============================================================================================================
// DEFINE NAMESPACE HPCLib
//=============================================================================================================
namespace HPCLib
{
//*************************************************************************************************************
//=============================================================================================================
// USED NAMESPACES
//=============================================================================================================
//*************************************************************************************************************
//=============================================================================================================
// DEFINE MEMBER METHODS
//=============================================================================================================
RapMusic_Cuda::RapMusic_Cuda()
: m_iPairCols(6)
, m_iMaxBlocksPerMultiProcessor(8) //CUDA C Programming Guide - Appendix F
{
}
//*************************************************************************************************************
RapMusic_Cuda::~RapMusic_Cuda()
{
m_host_pLeadFieldMat = NULL;
//garbage collecting
//######## CUDA START ########
// free the memory allocated on the GPU
/*HANDLE_ERROR( cudaFree( m_dev_pLeadFieldMat ) );*/
delete m_dev_pLeadFieldMat;
delete m_dev_pVecPairIdxCombinations;
//######## CUDA END ########
}
//*************************************************************************************************************
//template <class T>
bool RapMusic_Cuda::initRAPMusic( HPCLib::CudaDevice* p_pDeviceInfo,
HPCLib::Model<float>* p_pModel,
bool p_bSparsed, int p_iN, double p_dThr)
{
return initRAPMusic(p_pDeviceInfo,
p_bSparsed ? p_pModel->getSparsedLeadFieldMat() : p_pModel->getLeadFieldMat(),
p_bSparsed ? p_pModel->getSparsedGridMat() : p_pModel->getGridMat(),
p_iN, p_dThr);
}
//*************************************************************************************************************
//template <class T>
bool RapMusic_Cuda::initRAPMusic( HPCLib::CudaDevice* p_pDeviceInfo,
HPCMatrix<float>* p_pMatLeadField,
HPCMatrix<float>* p_pMatGrid,
int p_iN, double p_dThr)
{
m_iMultiProcessorCount = p_pDeviceInfo->getSelectedDeviceProperties().multiProcessorCount;//14;
m_iWarpSize = p_pDeviceInfo->getSelectedDeviceProperties().warpSize;//32;
m_iMaxThreadsPerMultiProcessor = p_pDeviceInfo->getSelectedDeviceProperties().maxThreadsPerMultiProcessor;//1536;
m_iSharedMemoryPerMultiProcessor = p_pDeviceInfo->getSelectedDeviceProperties().sharedMemPerBlock;//48*1024;
cublasStatus status = cublasInit ();
//Initialize RAP-MUSIC
std::cout << "##### Initialization CUDA RAP MUSIC started ######\n\n";
m_iN = p_iN;
m_dThreshold = p_dThr;
//Grid check
if(p_pMatGrid != NULL)
{
if ( p_pMatGrid->rows() != p_pMatLeadField->cols() / 3 )
{
std::cout << "Grid does not fit to given Lead Field!\n";
return false;
}
}
m_pMatGrid = p_pMatGrid;
//Lead Fiel check
if ( p_pMatLeadField->cols() % 3 != 0 )
{
std::cout << "Lead Field is not associated with a 3D grid!\n";
return false;
}
m_pMatLeadField = p_pMatLeadField;
m_dev_pLeadFieldMat = new cuHPCMatrix<float>(*p_pMatLeadField);//### CUDA ###
m_iNumGridPoints = (int)(m_dev_pLeadFieldMat->cols()/3);
m_iNumChannels = m_dev_pLeadFieldMat->rows();
//##### Calc lead field combination #####
std::cout << "Calculate lead field combinations. \n";
m_iNumLeadFieldCombinations = nchoose2(m_iNumGridPoints+1);
//######## CUDA START ########
// allocate device vector
m_dev_pVecPairIdxCombinations = new thrust::device_vector<int>(2 * m_iNumLeadFieldCombinations);
// obtain raw pointer to device vector’s memory -> for usage in kernel
m_dev_pPairIdxCombinations = thrust::raw_pointer_cast(&(*m_dev_pVecPairIdxCombinations)[0]);
cuCalcPairCombinations<<<128,1>>>( m_iNumGridPoints, m_iNumLeadFieldCombinations, m_dev_pPairIdxCombinations);
//######## CUDA END ########
std::cout << "Lead Field combinations calculated. \n\n";
//##### Calc lead field combination end #####
std::cout << "Number of grid points: " << m_iNumGridPoints << "\n\n";
std::cout << "Number of combinated points: " << m_iNumLeadFieldCombinations << "\n\n";
std::cout << "Number of sources to find: " << m_iN << "\n\n";
std::cout << "Threshold: " << m_dThreshold << "\n\n";
//Init end
std::cout << "##### Initialization CUDA RAP MUSIC completed ######\n\n\n";
m_bIsInit = true;
return m_bIsInit;
}
//*************************************************************************************************************
bool RapMusic_Cuda::calcRapMusic(HPCMatrix<float>* p_pMatMeasurement, RapDipoles<float>*& p_pRapDipoles)
{
//if not initialized -> break
if(!m_bIsInit)
{
std::cout << "RAP-Music wasn't initialized!"; //ToDo: catch this earlier
return false;
}
//Test if data are correct
if(p_pMatMeasurement->rows() != m_iNumChannels)
{
std::cout << "Lead Field channels do not fit to number of measurement channels!"; //ToDo: catch this earlier
return false;
}
// //Inits
// //Stop the time for benchmark purpose
// clock_t start, end;
// start = clock();
//Calculate the signal subspace (t_dev_pMatPhi_s)
cuHPCMatrix<float>* t_dev_pMatPhi_s = NULL;//(m_iNumChannels, t_r < m_iN ? m_iN : t_r);
//separate kernel for calcPhi_s -> not possible because measurement is often too big for shared memory
int t_r = calcPhi_s(*p_pMatMeasurement, t_dev_pMatPhi_s);
int t_iMaxSearch = m_iN < t_r ? m_iN : t_r; //The smallest of Rank and Iterations
if (t_r < m_iN)
{
std::cout << "Warning: Rank " << t_r << " of the measurement data is smaller than the " << m_iN;
std::cout << " sources to find." << std::endl;
std::cout << " Searching now for " << t_iMaxSearch << " correlated sources.";
std::cout << std::endl << std::endl;
}
//Create Orthogonal Projector
//OrthProj
HPCMatrix<float> t_matOrthProj(m_iNumChannels,m_iNumChannels);
t_matOrthProj.setIdentity();
cuHPCMatrix<float>* t_dev_pMatOrthProj = new cuHPCMatrix<float>(t_matOrthProj);//### CUDA ###
//A_k_1
HPCMatrix<float> t_matA_k_1(m_iNumChannels,t_iMaxSearch);
t_matA_k_1.reset(0.0);//setZero();
if (m_pMatGrid != NULL)
{
if(p_pRapDipoles != NULL)
p_pRapDipoles->initRapDipoles(m_pMatGrid);
else
p_pRapDipoles = new RapDipoles<float>(m_pMatGrid);
}
else
{
if(p_pRapDipoles != NULL)
delete p_pRapDipoles;
p_pRapDipoles = new RapDipoles<float>();
}
std::cout << "##### Calculation of CUDA RAP MUSIC started ######\n\n";
cuHPCMatrix<float>* t_dev_pMatProj_Phi_s = new cuHPCMatrix<float>(t_matOrthProj.rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
//new Version: Calculate projection before
HPCMatrix<float> t_matProj_LeadField(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());
cuHPCMatrix<float>* t_dev_pMatProj_LeadField = new cuHPCMatrix<float>(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());//### CUDA ###
for(int r = 0; r < t_iMaxSearch; ++r)
{
//ToDO needs to be checked whether using device pointer is performant
//t_dev_pMatProj_Phi_s = t_dev_pMatOrthProj*t_dev_pMatPhi_s;
t_dev_pMatProj_Phi_s->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*t_dev_pMatPhi_s);//### CUDA ###
//new Version: Calculating Projection before -> ToDo remove this later on
t_matProj_LeadField = t_matOrthProj * (*m_pMatLeadField);//Subtract the found sources from the current found source
//t_dev_pMatProj_LeadField = t_dev_pMatOrthProj*m_dev_pLeadFieldMat;
t_dev_pMatProj_LeadField->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*m_dev_pLeadFieldMat);//### CUDA ###
//###First Option###
//Step 1: lt. Mosher 1998 -> Maybe tmp_Proj_Phi_S is already orthogonal -> so no SVD needed -> U_B = tmp_Proj_Phi_S;
cuHPCMatrix<float>* t_dev_pMatU_B = new cuHPCMatrix<float>(t_dev_pMatPhi_s->rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
cuHPCValue<int> t_dev_iRank(0);//### CUDA ###
int t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
int t_iTh_z = 2;//1;
int t_iMatSize_U_B = t_dev_pMatPhi_s->rows() * t_dev_pMatPhi_s->cols();
int t_iWMatSize_U_B = t_dev_pMatPhi_s->cols();
int t_iCacheYZSize_U_B = t_iTh_y*t_iTh_z;
int t_iSVDCache_U_B = t_dev_pMatPhi_s->cols()+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMem_U_B = sizeof(float) * (t_iMatSize_U_B + t_iWMatSize_U_B + t_iCacheYZSize_U_B + t_iSVDCache_U_B);
dim3 blocks = dim3( 1, 1);
dim3 threads = dim3( 1, t_iTh_y, t_iTh_z);
cuCalcU_B<<< blocks, threads, t_iSharedMem_U_B >>>
( t_dev_pMatProj_Phi_s->data(),
t_dev_pMatProj_Phi_s->rows(),
t_dev_pMatProj_Phi_s->cols(),
t_dev_pMatU_B->data(),
t_dev_iRank.data());
HANDLE_ERROR( cudaThreadSynchronize() ); //to ensure that the kernel has completed
int t_iFullRank_U_B = t_dev_iRank.toHostValue();
HPCMatrix<float> t_matU_B(t_dev_pMatProj_Phi_s->rows(), t_iFullRank_U_B);
// copy the array back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( t_matU_B.data(), t_dev_pMatU_B->data(),
t_iFullRank_U_B * t_dev_pMatProj_Phi_s->rows() * sizeof(float),
cudaMemcpyDeviceToHost ) );
//ToDo - better to resize - drop no longer needed columns
delete t_dev_pMatU_B;
t_dev_pMatU_B = new cuHPCMatrix<float>(t_matU_B);//### CUDA ###
//######## CUDA START ########
// allocate device vector
thrust::device_vector<float> t_dev_vecRoh(m_iNumLeadFieldCombinations);
// obtain raw pointer to device vector’s memory -> for usage in kernel
float * t_dev_pRoh = thrust::raw_pointer_cast(&t_dev_vecRoh[0]);
//######## CUDA END ########
// subcorr GPU
//######## CUDA START ########
cudaEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
t_iTh_z = 2;//1;
int t_iPairMatSize = m_dev_pLeadFieldMat->rows() * m_iPairCols;
int t_iWMatSize = m_iPairCols;
int t_iCorMatSize = m_iPairCols*t_iFullRank_U_B;
int t_iCacheYZSize = t_iTh_y*t_iTh_z;
int t_iSVDCache = m_iPairCols+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMemPerPairMat = sizeof(float) * (t_iPairMatSize + t_iWMatSize + t_iCorMatSize + t_iCacheYZSize + t_iSVDCache);
int t_iPairMatsPerMultiProcessor = m_iSharedMemoryPerMultiProcessor/t_iSharedMemPerPairMat;
std::cout << "Shared Memory Usage: " << t_iSharedMemPerPairMat << " Byte x " << t_iPairMatsPerMultiProcessor << std::endl;
int t_iPairMatsPerBlock = ceil((float)(t_iPairMatsPerMultiProcessor)/(float)m_iMaxBlocksPerMultiProcessor);//=threadDim.x
/*DIRTY HACK*/ t_iPairMatsPerBlock = 2;//t_iPairMatsPerBlock > 2 ? 2 : t_iPairMatsPerBlock;//ToDo Debug when 3 Mats per Block we get the wrong result
std::cout << "Pair Mats per Block: " << t_iPairMatsPerBlock << std::endl;
size_t t_iSharedMemPerBlock = t_iSharedMemPerPairMat * t_iPairMatsPerBlock;
blocks = dim3( /*7381*/ 64*m_iMultiProcessorCount, 1);
threads = dim3( t_iPairMatsPerBlock, t_iTh_y, t_iTh_z);
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
RapMusicSubcorr<<<blocks, threads, t_iSharedMemPerBlock>>>
( t_dev_pMatProj_LeadField->data(),
t_dev_pMatProj_LeadField->rows(),
t_dev_pMatProj_LeadField->cols(),
m_dev_pPairIdxCombinations,
m_iNumLeadFieldCombinations,
t_dev_pMatU_B->data(),
t_iFullRank_U_B,
t_dev_pRoh );
HANDLE_ERROR( cudaThreadSynchronize() ); //to ensure that the kernel has completed
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
// free events
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
std::cout << "Time Elapsed: " << elapsedTime << " ms" << std::endl;
//######## CUDA END ########
//Find the maximum of correlation
//######## CUDA THRUST START ########
//max_element returns an iterator, so to convert that into a position we subtract the iterator at the beginning of the vector.
int t_iMaxIdx = thrust::max_element(t_dev_vecRoh.begin(), t_dev_vecRoh.end()) - t_dev_vecRoh.begin();
float t_val_roh_k = t_dev_vecRoh[t_iMaxIdx];
//######## THRUST CUDA END ########
//get positions in sparsed leadfield from index combinations;
int t_iIdx1 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx];
int t_iIdx2 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx+1];
// (Idx+1) because of MATLAB positions -> starting with 1 not with 0
std::cout << "Iteration: " << r+1 << " of " << t_iMaxSearch
<< "; Correlation: " << t_val_roh_k<< "; Position (Idx+1): " << t_iIdx1+1 << " - " << t_iIdx2+1 <<"\n\n";
//Calculations with the max correlated dipole pair G_k_1
HPCMatrix<float> t_matG_k_1(t_matProj_LeadField.rows(),6);
getLeadFieldPair(*m_pMatLeadField, t_matG_k_1, t_iIdx1, t_iIdx2);
HPCMatrix<float> t_matProj_G_k_1(t_matOrthProj.rows(), t_matG_k_1.cols());
t_matProj_G_k_1 = t_matOrthProj * t_matG_k_1;//Subtract the found sources from the current found source
//Calculate source direction
//source direction (p_pMatPhi) for current source r (phi_k_1)
HPCMatrix<float> t_vec_phi_k_1(6, 1);
subcorr(t_matProj_G_k_1, t_matU_B, t_vec_phi_k_1);//Correlate the current source to calculate the direction
//Set return values
p_pRapDipoles->insertSource(t_iIdx1, t_iIdx2, t_vec_phi_k_1.data(), t_val_roh_k);
//Stop Searching when Correlation is smaller then the Threshold
if (t_val_roh_k < m_dThreshold)
{
std::cout << "Searching stopped, last correlation " << t_val_roh_k;
std::cout << " is smaller then the given threshold " << m_dThreshold << std::endl << std::endl;
break;
}
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
calcA_k_1(t_matG_k_1, t_vec_phi_k_1, r, t_matA_k_1);
//Calculate new orthogonal Projector (Pi_k_1)
calcOrthProj(t_matA_k_1, t_matOrthProj);
//#### CUDA START ####
HANDLE_ERROR( cudaMemcpy( t_dev_pMatOrthProj->data(),
t_matOrthProj.data(),
sizeof(float) * t_matOrthProj.size(),
cudaMemcpyHostToDevice ) );
//#### CUDA END ####
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatU_B;
// free the memory we allocated on the CPU
}
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatProj_LeadField;
delete t_dev_pMatProj_Phi_s;
delete t_dev_pMatOrthProj;
delete t_dev_pMatPhi_s;
// free the memory we allocated on the CPU
std::cout << "##### Calculation of CUDA RAP MUSIC completed ######\n\n";
// end = clock();
//
// float t_fElapsedTime = ( (float)(end-start) / (float)CLOCKS_PER_SEC ) * 1000.0f;
// std::cout << "Time Elapsed: " << t_fElapsedTime << " ms" << std::endl << std::endl;
//garbage collecting
//ToDo
return true;
}
//*************************************************************************************************************
bool RapMusic_Cuda::calcPowellRAPMusic(HPCMatrix<float>* p_pMatMeasurement, RapDipoles<float>*& p_pRapDipoles)
{
//if not initialized -> break
if(!m_bIsInit)
{
std::cout << "RAP-Music wasn't initialized!"; //ToDo: catch this earlier
return false;
}
//Test if data are correct
if(p_pMatMeasurement->rows() != m_iNumChannels)
{
std::cout << "Lead Field channels do not fit to number of measurement channels!"; //ToDo: catch this earlier
return false;
}
// //Inits
// //Stop the time for benchmark purpose
// clock_t start, end;
// start = clock();
//Calculate the signal subspace (t_dev_pMatPhi_s)
cuHPCMatrix<float>* t_dev_pMatPhi_s = NULL;//(m_iNumChannels, t_r < m_iN ? m_iN : t_r);
//separate kernel for calcPhi_s -> not possible because measurement is often too big for shared memory
int t_r = calcPhi_s(*p_pMatMeasurement, t_dev_pMatPhi_s);
int t_iMaxSearch = m_iN < t_r ? m_iN : t_r; //The smallest of Rank and Iterations
if (t_r < m_iN)
{
std::cout << "Warning: Rank " << t_r << " of the measurement data is smaller than the " << m_iN;
std::cout << " sources to find." << std::endl;
std::cout << " Searching now for " << t_iMaxSearch << " correlated sources.";
std::cout << std::endl << std::endl;
}
//Create Orthogonal Projector
//OrthProj
HPCMatrix<float> t_matOrthProj(m_iNumChannels,m_iNumChannels);
t_matOrthProj.setIdentity();
cuHPCMatrix<float>* t_dev_pMatOrthProj = new cuHPCMatrix<float>(t_matOrthProj);//### CUDA ###
//A_k_1
HPCMatrix<float> t_matA_k_1(m_iNumChannels,t_iMaxSearch);
t_matA_k_1.reset(0.0);//setZero();
if (m_pMatGrid != NULL)
{
if(p_pRapDipoles != NULL)
p_pRapDipoles->initRapDipoles(m_pMatGrid);
else
p_pRapDipoles = new RapDipoles<float>(m_pMatGrid);
}
else
{
if(p_pRapDipoles != NULL)
delete p_pRapDipoles;
p_pRapDipoles = new RapDipoles<float>();
}
std::cout << "##### Calculation of CUDA RAP MUSIC started ######\n\n";
cuHPCMatrix<float>* t_dev_pMatProj_Phi_s = new cuHPCMatrix<float>(t_matOrthProj.rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
//new Version: Calculate projection before
HPCMatrix<float> t_matProj_LeadField(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());
cuHPCMatrix<float>* t_dev_pMatProj_LeadField = new cuHPCMatrix<float>(m_dev_pLeadFieldMat->rows(), m_dev_pLeadFieldMat->cols());//### CUDA ###
for(int r = 0; r < t_iMaxSearch; ++r)
{
//ToDO needs to be checked whether using device pointer is performant
//t_dev_pMatProj_Phi_s = t_dev_pMatOrthProj*t_dev_pMatPhi_s;
t_dev_pMatProj_Phi_s->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*t_dev_pMatPhi_s);//### CUDA ###
//new Version: Calculating Projection before -> ToDo remove this later on
t_matProj_LeadField = t_matOrthProj * (*m_pMatLeadField);//Subtract the found sources from the current found source
//t_dev_pMatProj_LeadField = t_dev_pMatOrthProj*m_dev_pLeadFieldMat;
t_dev_pMatProj_LeadField->cuHPCMatMult('N', 'N',*t_dev_pMatOrthProj,*m_dev_pLeadFieldMat);//### CUDA ###
//###First Option###
//Step 1: lt. Mosher 1998 -> Maybe tmp_Proj_Phi_S is already orthogonal -> so no SVD needed -> U_B = tmp_Proj_Phi_S;
cuHPCMatrix<float>* t_dev_pMatU_B = new cuHPCMatrix<float>(t_dev_pMatPhi_s->rows(), t_dev_pMatPhi_s->cols());//### CUDA ###
cuHPCValue<int> t_dev_iRank(0);//### CUDA ###
int t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
int t_iTh_z = 2;//1;
int t_iMatSize_U_B = t_dev_pMatPhi_s->rows() * t_dev_pMatPhi_s->cols();
int t_iWMatSize_U_B = t_dev_pMatPhi_s->cols();
int t_iCacheYZSize_U_B = t_iTh_y*t_iTh_z;
int t_iSVDCache_U_B = t_dev_pMatPhi_s->cols()+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMem_U_B = sizeof(float) * (t_iMatSize_U_B + t_iWMatSize_U_B + t_iCacheYZSize_U_B + t_iSVDCache_U_B);
dim3 blocks = dim3( 1, 1);
dim3 threads = dim3( 1, t_iTh_y, t_iTh_z);
cuCalcU_B<<< blocks, threads, t_iSharedMem_U_B >>>
( t_dev_pMatProj_Phi_s->data(),
t_dev_pMatProj_Phi_s->rows(),
t_dev_pMatProj_Phi_s->cols(),
t_dev_pMatU_B->data(),
t_dev_iRank.data());
HANDLE_ERROR( cudaThreadSynchronize() ); //to ensure that the kernel has completed
int t_iFullRank_U_B = t_dev_iRank.toHostValue();
HPCMatrix<float> t_matU_B(t_dev_pMatProj_Phi_s->rows(), t_iFullRank_U_B);
// copy the array back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( t_matU_B.data(), t_dev_pMatU_B->data(),
t_iFullRank_U_B * t_dev_pMatProj_Phi_s->rows() * sizeof(float),
cudaMemcpyDeviceToHost ) );
//ToDo - better to resize - drop no longer needed columns
delete t_dev_pMatU_B;
t_dev_pMatU_B = new cuHPCMatrix<float>(t_matU_B);//### CUDA ###
//######## CUDA START ########
// allocate device vector
thrust::device_vector<float> t_dev_vecRoh(m_iNumLeadFieldCombinations);
// obtain raw pointer to device vector’s memory -> for usage in kernel
float * t_dev_pRoh = thrust::raw_pointer_cast(&t_dev_vecRoh[0]);
// allocate device vector
thrust::device_vector<int> t_dev_vecRowIndezes(m_iNumGridPoints);
// obtain raw pointer to device vector’s memory -> for usage in kernel
int * t_dev_pRowIndezes = thrust::raw_pointer_cast(&t_dev_vecRowIndezes[0]);
//######## CUDA END ########
// subcorr GPU
//Powell
int t_iCurrentRow = 2;
int t_iIdx1 = -1;
int t_iIdx2 = -1;
int t_iMaxIdx_old = -1;
int t_iMaxIdx = -1;
int t_iMaxFound = 0;
float t_val_roh_k = 0;
int t_iNumVecElements = m_iNumGridPoints;
while(t_iMaxFound == 0)
{
//######## CUDA START ########
cudaEvent_t start, stop;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
//Powell Indizes
cuPowellIdxVec<<<32, 32>>>( t_iCurrentRow,
t_iNumVecElements,
t_dev_pRowIndezes );
HANDLE_ERROR( cudaThreadSynchronize() ); //to ensure that the kernel has completed
// //DEBUG
// thrust::host_vector<int> h_vec(m_iNumGridPoints);
// // transfer data back to host
// thrust::copy(t_dev_vecRowIndezes.begin(), t_dev_vecRowIndezes.end(), h_vec.begin());
// std::cout << "indezes" << std::endl;
// for(int i = 0; i < 10; ++i)
// std::cout << h_vec[i] << std::endl;
// //DEBUG
t_iTh_y = 8;//16; //ToDo: More than 8 threads - wrong results
t_iTh_z = 2;//1;
int t_iPairMatSize = m_dev_pLeadFieldMat->rows() * m_iPairCols;
int t_iWMatSize = m_iPairCols;
int t_iCorMatSize = m_iPairCols*t_iFullRank_U_B;
int t_iCacheYZSize = t_iTh_y*t_iTh_z;
int t_iSVDCache = m_iPairCols+1+1;//rv1[m_iPairCols]; scale; s
size_t t_iSharedMemPerPairMat = sizeof(float) * (t_iPairMatSize + t_iWMatSize + t_iCorMatSize + t_iCacheYZSize + t_iSVDCache);
int t_iPairMatsPerMultiProcessor = m_iSharedMemoryPerMultiProcessor/t_iSharedMemPerPairMat;
std::cout << "Shared Memory Usage: " << t_iSharedMemPerPairMat << " Byte x " << t_iPairMatsPerMultiProcessor << std::endl;
int t_iPairMatsPerBlock = ceil((float)(t_iPairMatsPerMultiProcessor)/(float)m_iMaxBlocksPerMultiProcessor);//=threadDim.x
/*DIRTY HACK*/ t_iPairMatsPerBlock = 2;//t_iPairMatsPerBlock > 2 ? 2 : t_iPairMatsPerBlock;//ToDo Debug when 3 Mats per Block we get the wrong result
std::cout << "Pair Mats per Block: " << t_iPairMatsPerBlock << std::endl;
size_t t_iSharedMemPerBlock = t_iSharedMemPerPairMat * t_iPairMatsPerBlock;
blocks = dim3( /*7381*/ 64*m_iMultiProcessorCount, 1);
threads = dim3( t_iPairMatsPerBlock, t_iTh_y, t_iTh_z);
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
PowellRapMusicSubcorr<<<blocks, threads, t_iSharedMemPerBlock>>>
( t_dev_pMatProj_LeadField->data(),
t_dev_pMatProj_LeadField->rows(),
t_dev_pMatProj_LeadField->cols(),
m_dev_pPairIdxCombinations,
t_dev_pRowIndezes,
t_iNumVecElements,
t_dev_pMatU_B->data(),
t_iFullRank_U_B,
t_dev_pRoh );
HANDLE_ERROR( cudaThreadSynchronize() ); //to ensure that the kernel has completed
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
// free events
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
std::cout << "Time Elapsed: " << elapsedTime << " ms" << std::endl;
//######## CUDA END ########
//Find the maximum of correlation
//######## CUDA THRUST START ########
//max_element returns an iterator, so to convert that into a position we subtract the iterator at the beginning of the vector.
t_iMaxIdx = thrust::max_element(t_dev_vecRoh.begin(), t_dev_vecRoh.end()) - t_dev_vecRoh.begin();
t_val_roh_k = t_dev_vecRoh[t_iMaxIdx];
//######## THRUST CUDA END ########
//Powell
if(t_iMaxIdx == t_iMaxIdx_old)
{
t_iMaxFound = 1;
break;
}
else
{
t_iMaxIdx_old = t_iMaxIdx;
//get positions in sparsed leadfield from index combinations;
t_iIdx1 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx];
t_iIdx2 = (*m_dev_pVecPairIdxCombinations)[2*t_iMaxIdx+1];
}
//set new index
if(t_iIdx1 == t_iCurrentRow)
t_iCurrentRow = t_iIdx2;
else
t_iCurrentRow = t_iIdx1;
}
// (Idx+1) because of MATLAB positions -> starting with 1 not with 0
std::cout << "Iteration: " << r+1 << " of " << t_iMaxSearch
<< "; Correlation: " << t_val_roh_k<< "; Position (Idx+1): " << t_iIdx1+1 << " - " << t_iIdx2+1 <<"\n\n";
//Calculations with the max correlated dipole pair G_k_1
HPCMatrix<float> t_matG_k_1(t_matProj_LeadField.rows(),6);
getLeadFieldPair(*m_pMatLeadField, t_matG_k_1, t_iIdx1, t_iIdx2);
HPCMatrix<float> t_matProj_G_k_1(t_matOrthProj.rows(), t_matG_k_1.cols());
t_matProj_G_k_1 = t_matOrthProj * t_matG_k_1;//Subtract the found sources from the current found source
//Calculate source direction
//source direction (p_pMatPhi) for current source r (phi_k_1)
HPCMatrix<float> t_vec_phi_k_1(6, 1);
subcorr(t_matProj_G_k_1, t_matU_B, t_vec_phi_k_1);//Correlate the current source to calculate the direction
//Set return values
p_pRapDipoles->insertSource(t_iIdx1, t_iIdx2, t_vec_phi_k_1.data(), t_val_roh_k);
//Stop Searching when Correlation is smaller then the Threshold
if (t_val_roh_k < m_dThreshold)
{
std::cout << "Searching stopped, last correlation " << t_val_roh_k;
std::cout << " is smaller then the given threshold " << m_dThreshold << std::endl << std::endl;
break;
}
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
calcA_k_1(t_matG_k_1, t_vec_phi_k_1, r, t_matA_k_1);
//Calculate new orthogonal Projector (Pi_k_1)
calcOrthProj(t_matA_k_1, t_matOrthProj);
//#### CUDA START ####
HANDLE_ERROR( cudaMemcpy( t_dev_pMatOrthProj->data(),
t_matOrthProj.data(),
sizeof(float) * t_matOrthProj.size(),
cudaMemcpyHostToDevice ) );
//#### CUDA END ####
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatU_B;
// free the memory we allocated on the CPU
}
//garbage collecting
// free the memory allocated on the GPU
delete t_dev_pMatProj_LeadField;
delete t_dev_pMatProj_Phi_s;
delete t_dev_pMatOrthProj;
delete t_dev_pMatPhi_s;
// free the memory we allocated on the CPU
std::cout << "##### Calculation of CUDA RAP MUSIC completed ######\n\n";
// end = clock();
//
// float t_fElapsedTime = ( (float)(end-start) / (float)CLOCKS_PER_SEC ) * 1000.0f;
// std::cout << "Time Elapsed: " << t_fElapsedTime << " ms" << std::endl << std::endl;
//garbage collecting
//ToDo
return true;
}
//*************************************************************************************************************
int RapMusic_Cuda::nchoose2(int n)
{
//nchoosek(n, k) with k = 2, equals n*(n-1)*0.5
int t_iNumOfCombination = (int)(n*(n-1)*0.5);
return t_iNumOfCombination;
}
//*************************************************************************************************************
//template <class T>
int RapMusic_Cuda/*<T>*/::calcPhi_s(const HPCMatrix<float>& p_pMatMeasurement, cuHPCMatrix<float>* &p_dev_pMatPhi_s)
{
//Calculate p_dev_pMatPhi_s
HPCMatrix<float> t_matF;
if (p_pMatMeasurement.cols() > p_pMatMeasurement.rows())
{
t_matF = makeSquareMat(p_pMatMeasurement); //FF^T
}
else
{
t_matF = p_pMatMeasurement;
}
SVD phi_sSVD(t_matF, 1);
int t_r = getRank(phi_sSVD.singularValues());
int t_iCols = t_r; //t_r < m_iN ? m_iN : t_r;
if (p_dev_pMatPhi_s != NULL)
delete p_dev_pMatPhi_s;
//m_iNumChannels has to be equal to t_svdF.matrixU().rows()
p_dev_pMatPhi_s = new cuHPCMatrix<float>(m_iNumChannels, t_iCols);
//assign the signal subspace
// copy the array from the CPU to the GPU
HANDLE_ERROR( cudaMemcpy( p_dev_pMatPhi_s->data(), phi_sSVD.matrixU()->data(),
sizeof(float) * m_iNumChannels *t_iCols,
cudaMemcpyHostToDevice ) );
//ToDO Use jojos svd instead of cula
//garbage collecting
return t_r;
}
//*************************************************************************************************************
//Direction Subcorr
float RapMusic_Cuda::subcorr(HPCMatrix<float>& p_matProj_G, HPCMatrix<float>& p_matU_B, HPCMatrix<float>& p_vec_phi_k_1)
{
//Orthogonalisierungstest wegen performance weggelassen -> ohne is es viel schneller
SVD t_svdProj_G(p_matProj_G, 3);
HPCMatrix<float> U_A_T(6, t_svdProj_G.matrixU()->rows());
U_A_T = t_svdProj_G.matrixU()->transpose();
HPCMatrix<float>* sigma_A = t_svdProj_G.singularValues();
HPCMatrix<float>* V_A = t_svdProj_G.matrixV();
//lt. Mosher 1998 ToDo: Only Retain those Components of U_A and U_B that correspond to nonzero singular values
//for U_A and U_B the number of columns corresponds to their ranks
//-> reduce to rank only when directions aren't calculated, otherwise use the full U_A_T
HPCMatrix<float> Cor(6, p_matU_B.cols());
//Step 2: compute the subspace correlation
Cor = U_A_T*p_matU_B;//lt. Mosher 1998: C = U_A^T * U_B
HPCMatrix<float>* t_vecSigma_C;
//Step 4
HPCMatrix<float>* U_C;
if (Cor.cols() > Cor.rows())
{
Cor = Cor.transpose();//adjoint(); //for complex it has to be adjunct
SVD svdOfCor_H(Cor, 2);
U_C = new HPCMatrix<float>(svdOfCor_H.matrixV()->rows(), svdOfCor_H.matrixV()->cols());
//because Cor Hermitesch U and V are exchanged
memcpy(U_C->data(),svdOfCor_H.matrixV()->data(),(U_C->size()*sizeof(float)));
t_vecSigma_C = new HPCMatrix<float>(svdOfCor_H.singularValues()->rows(), svdOfCor_H.singularValues()->cols());
memcpy(t_vecSigma_C->data(),svdOfCor_H.singularValues()->data(),(t_vecSigma_C->size()*sizeof(float)));
}
else
{
SVD svdOfCor(Cor, 1);
U_C = new HPCMatrix<float>(svdOfCor.matrixU()->rows(), svdOfCor.matrixU()->cols());
memcpy(U_C->data(),svdOfCor.matrixU()->data(),(U_C->size()*sizeof(float)));
t_vecSigma_C = new HPCMatrix<float>(svdOfCor.singularValues()->rows(), svdOfCor.singularValues()->cols());
memcpy(t_vecSigma_C->data(),svdOfCor.singularValues()->data(),(t_vecSigma_C->size()*sizeof(float)));
}
//invert sigma A
HPCMatrix<float> sigma_a_inv(sigma_A->rows(), sigma_A->rows());
for (int i = 0; i < sigma_A->rows(); ++i)
{
sigma_a_inv(i,i) = 1/sigma_A->data()[i];
}
HPCMatrix<float> X(6,U_C->cols());
X = ((*V_A)*sigma_a_inv)*(*U_C);//X = V_A*Sigma_A^-1*U_C
float norm_X = 0;
for(int i = 0; i < 6; ++i)
norm_X += pow(X.data()[i], 2);
norm_X = 1/sqrt(norm_X);
//Multiply a scalar with an Array -> linear transform
for(int i = 0; i < 6; ++i)
p_vec_phi_k_1.data()[i] = X.data()[i]*norm_X;//u1 = x1/||x1|| this is the orientation
//Step 3
float ret_sigma_C;
ret_sigma_C = t_vecSigma_C->data()[0]; //Take only the correlation of the first principal components
//garbage collecting
delete U_C;
delete t_vecSigma_C;
return ret_sigma_C;
}
//*************************************************************************************************************
void RapMusic_Cuda::calcA_k_1( const HPCMatrix<float>& p_matG_k_1,
const HPCMatrix<float>& p_matPhi_k_1,
const int p_iIdxk_1,
HPCMatrix<float>& p_matA_k_1)
{
//Calculate A_k_1 = [a_theta_1..a_theta_k_1] matrix for subtraction of found source
HPCMatrix<float> t_vec_a_theta_k_1(p_matG_k_1.rows(),1);
t_vec_a_theta_k_1 = p_matG_k_1*p_matPhi_k_1; // a_theta_k_1 = G_k_1*phi_k_1 this corresponds to the normalized signal component in subspace r
memcpy( p_matA_k_1.data()+p_iIdxk_1*p_matA_k_1.rows(),
t_vec_a_theta_k_1.data(),
(p_matA_k_1.rows()*sizeof(float)));
}
//*************************************************************************************************************
void RapMusic_Cuda::calcOrthProj(const HPCMatrix<float>& p_matA_k_1, HPCMatrix<float>& p_matOrthProj)
{
//Calculate OrthProj=I-A_k_1*(A_k_1'*A_k_1)^-1*A_k_1' //Wetterling -> A_k_1 = Gain
HPCMatrix<float> t_matA_k_1_tmp(p_matA_k_1.cols(), p_matA_k_1.cols());
t_matA_k_1_tmp = p_matA_k_1.transpose()/*adjoint()*/*p_matA_k_1;//A_k_1'*A_k_1 = A_k_1_tmp -> A_k_1' has to be adjoint for complex
int t_size = t_matA_k_1_tmp.cols();
while (!t_matA_k_1_tmp(t_size-1,t_size-1))
{
--t_size;
}
HPCMatrix<float> t_matA_k_1_tmp_inv(t_matA_k_1_tmp.rows(), t_matA_k_1_tmp.cols());
HPCMatrix<float> t_matA_k_1_tmpsubmat = t_matA_k_1_tmp.get(0,0,t_size,t_size);
LU t_matA_k_1_LU(&t_matA_k_1_tmpsubmat);
for(int i = 0; i < t_matA_k_1_tmpsubmat.rows(); ++i)
for(int j = 0; j < t_matA_k_1_tmpsubmat.cols(); ++j)
t_matA_k_1_tmp_inv(i,j) = t_matA_k_1_LU.invert()(i,j);//(A_k_1_tmp)^-1 = A_k_1_tmp_inv
t_matA_k_1_tmp.resize(p_matA_k_1.rows(), p_matA_k_1.cols());
t_matA_k_1_tmp = p_matA_k_1*t_matA_k_1_tmp_inv;//(A_k_1*A_k_1_tmp_inv) = A_k_1_tmp
HPCMatrix<float> t_matA_k_1_tmp2(p_matA_k_1.rows(), p_matA_k_1.rows());
t_matA_k_1_tmp2 = t_matA_k_1_tmp*p_matA_k_1.transpose();//adjoint();//(A_k_1_tmp)*A_k_1' -> here A_k_1' is only transposed - it has to be adjoint
HPCMatrix<float> I(m_iNumChannels,m_iNumChannels);
I.diag(1.0);//setIdentity();
p_matOrthProj = I-t_matA_k_1_tmp2; //OrthProj=I-A_k_1*(A_k_1'*A_k_1)^-1*A_k_1';
// //garbage collecting
// //ToDo
}
//*************************************************************************************************************
//ToDo don't make a real copy
void RapMusic_Cuda::getLeadFieldPair( HPCMatrix<float>& p_matLeadField,
HPCMatrix<float>& p_matLeadField_Pair,
int p_iIdx1, int p_iIdx2)
{
memcpy( p_matLeadField_Pair.data(),
p_matLeadField.data()+p_iIdx1*3*p_matLeadField.rows(),
(p_matLeadField.rows()*3*sizeof(float)));
memcpy( p_matLeadField_Pair.data()+3*p_matLeadField.rows(),
p_matLeadField.data()+p_iIdx2*3*p_matLeadField.rows(),
(p_matLeadField.rows()*3*sizeof(float)));
}
}//Namespace
|
ebfb7c050da1362934266411dddd425239ac7ce3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void spin(clock_t numClocks)
{
for (const clock_t threshold = clock() + numClocks; clock() < threshold;);
}
int main(int argc, char *argv[])
{
// Initialize constants.
const int numMilliseconds = 10;
const int numKernels = 2;
const int numStreams = 32;
// Get the major and minor compute capability version numbers.
int major, minor;
hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, 0);
hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, 0);
// Get the peak clock frequency in KHz of device 0.
int clockRate;
hipDeviceGetAttribute(&clockRate, hipDeviceAttributeClockRate, 0);
// Calculate the number of clocks within a certain period.
clock_t numClocks = clockRate * numMilliseconds;
// Create streams to enqueue kernels.
hipStream_t *streams = (hipStream_t*)malloc(sizeof(hipStream_t) * numStreams);
for (int s = 0; s < numStreams; ++s)
{
hipStreamCreate(&streams[s]);
}
// Create events to record timing data.
hipEvent_t beg, end;
hipEventCreate(&beg);
hipEventCreate(&end);
// Record an event in stream 0 before kernel invocations.
hipEventRecord(beg, 0);
// Enqueue kernels to streams.
for (int s = 0; s < numStreams; ++s)
{
for (int k = 0; k < numKernels; ++k)
{
hipLaunchKernelGGL(( spin), dim3(1), dim3(1), 0, streams[s], numClocks);
}
}
// Record an event in stream 0 after kernel invocations.
hipEventRecord(end, 0);
// Wait for the event to complete.
hipEventSynchronize(end);
// Compute the elapsed time between two events.
float elapsed;
hipEventElapsedTime(&elapsed, beg, end);
// Print the result.
printf("%d streams, each %d kernels, each %d ms\n", numStreams, numKernels, numMilliseconds);
printf(" SM <= 1.3:%4d ms\n", numMilliseconds * numKernels * numStreams);
printf("2.0 <= SM <= 3.0:%4d ms\n", numMilliseconds * (1 + (numKernels - 1) * numStreams));
printf("3.5 <= SM :%4d ms\n", numMilliseconds * numKernels);
printf(" SM == %d.%d:%4d ms\n", major, minor, (int)elapsed);
// Cleanup.
hipEventDestroy(end);
hipEventDestroy(beg);
for (int s = 0; s < numStreams; ++s)
{
hipStreamDestroy(streams[s]);
}
free(streams);
hipDeviceReset();
}
|
ebfb7c050da1362934266411dddd425239ac7ce3.cu
|
#include <stdio.h>
__global__ void spin(clock_t numClocks)
{
for (const clock_t threshold = clock() + numClocks; clock() < threshold;);
}
int main(int argc, char *argv[])
{
// Initialize constants.
const int numMilliseconds = 10;
const int numKernels = 2;
const int numStreams = 32;
// Get the major and minor compute capability version numbers.
int major, minor;
cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, 0);
cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, 0);
// Get the peak clock frequency in KHz of device 0.
int clockRate;
cudaDeviceGetAttribute(&clockRate, cudaDevAttrClockRate, 0);
// Calculate the number of clocks within a certain period.
clock_t numClocks = clockRate * numMilliseconds;
// Create streams to enqueue kernels.
cudaStream_t *streams = (cudaStream_t*)malloc(sizeof(cudaStream_t) * numStreams);
for (int s = 0; s < numStreams; ++s)
{
cudaStreamCreate(&streams[s]);
}
// Create events to record timing data.
cudaEvent_t beg, end;
cudaEventCreate(&beg);
cudaEventCreate(&end);
// Record an event in stream 0 before kernel invocations.
cudaEventRecord(beg, 0);
// Enqueue kernels to streams.
for (int s = 0; s < numStreams; ++s)
{
for (int k = 0; k < numKernels; ++k)
{
spin<<<1, 1, 0, streams[s]>>>(numClocks);
}
}
// Record an event in stream 0 after kernel invocations.
cudaEventRecord(end, 0);
// Wait for the event to complete.
cudaEventSynchronize(end);
// Compute the elapsed time between two events.
float elapsed;
cudaEventElapsedTime(&elapsed, beg, end);
// Print the result.
printf("%d streams, each %d kernels, each %d ms\n", numStreams, numKernels, numMilliseconds);
printf(" SM <= 1.3:%4d ms\n", numMilliseconds * numKernels * numStreams);
printf("2.0 <= SM <= 3.0:%4d ms\n", numMilliseconds * (1 + (numKernels - 1) * numStreams));
printf("3.5 <= SM :%4d ms\n", numMilliseconds * numKernels);
printf(" SM == %d.%d:%4d ms\n", major, minor, (int)elapsed);
// Cleanup.
cudaEventDestroy(end);
cudaEventDestroy(beg);
for (int s = 0; s < numStreams; ++s)
{
cudaStreamDestroy(streams[s]);
}
free(streams);
cudaDeviceReset();
}
|
2de45594a35fe804a914551d5d9de4d085ece6fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "operator/nrm2.h"
namespace cux {
extern __global__ void DotDeviceV2(const int len, const float *vec_a, const float *vec_b, float *res);
__global__ void Sqrt(const int n, float *x) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x) {
x[i] = __fsqrt_rn(x[i]);
}
}
// Fuse DotDeviceV2 and Sqrt.
void Nrm2DeviceV0(Config1D config, int n, const float *x, float *result) {
DotDeviceV2 << <config.blocks_per_grid,
config.threads_per_block,
config.shared_memory_size >> >
(n, x, x, result);
Sqrt << <1,1,1>> >(1, result);
}
void Nrm2::GpuKernelsSetup() {
gpu_kernels_.clear();
// Kernel v0
{
auto get_config = [&](int len) -> Config1D {
Config1D config;
config.threads_per_block = 1024;
config.blocks_per_grid = (len + config.threads_per_block - 1) / config.threads_per_block;
config.shared_memory_size = config.threads_per_block * sizeof(float);
return config;
};
auto func = [&](Config1D config, int n, const void *x, void *result) -> void {
Nrm2DeviceV0(config, n, (float *)x, (float *)result);
};
Nrm2GpuKernelIF *kernel = new Nrm2GpuKernelIF();
kernel->type_flag = TypeFlag::FLOAT32;
kernel->describe_info = "Shared memory / Loop unrolling";
kernel->get_config = get_config;
kernel->func = func;
kernel->config_kernel = DotDeviceV2;
gpu_kernels_.push_back(kernel);
}
// Kernel v1.
{
auto get_config = [&](int len) -> Config1D {
Config1D config;
return config;
};
auto func = [&](Config1D config, int n, const void *x, void *result) -> void {
// HIPBLAS_POINTER_MODE_DEVICE: Return data on device -> res is a pointer for device.
// HIPBLAS_POINTER_MODE_HOST: On host.
CUBLAS_CHECK(hipblasSetPointerMode(assistor_->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_CHECK(hipblasSnrm2(assistor_->cublas_handle(), n, (float *)x, 1, (float *)result));
};
Nrm2GpuKernelIF *kernel = new Nrm2GpuKernelIF();
kernel->type_flag = TypeFlag::FLOAT32;
kernel->describe_info = "Cublas";
kernel->get_config = get_config;
kernel->func = func;
kernel->config_kernel = nullptr;
gpu_kernels_.push_back(kernel);
}
}
}
|
2de45594a35fe804a914551d5d9de4d085ece6fa.cu
|
#include "operator/nrm2.h"
namespace cux {
extern __global__ void DotDeviceV2(const int len, const float *vec_a, const float *vec_b, float *res);
__global__ void Sqrt(const int n, float *x) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x) {
x[i] = __fsqrt_rn(x[i]);
}
}
// Fuse DotDeviceV2 and Sqrt.
void Nrm2DeviceV0(Config1D config, int n, const float *x, float *result) {
DotDeviceV2 << <config.blocks_per_grid,
config.threads_per_block,
config.shared_memory_size >> >
(n, x, x, result);
Sqrt << <1,1,1>> >(1, result);
}
void Nrm2::GpuKernelsSetup() {
gpu_kernels_.clear();
// Kernel v0
{
auto get_config = [&](int len) -> Config1D {
Config1D config;
config.threads_per_block = 1024;
config.blocks_per_grid = (len + config.threads_per_block - 1) / config.threads_per_block;
config.shared_memory_size = config.threads_per_block * sizeof(float);
return config;
};
auto func = [&](Config1D config, int n, const void *x, void *result) -> void {
Nrm2DeviceV0(config, n, (float *)x, (float *)result);
};
Nrm2GpuKernelIF *kernel = new Nrm2GpuKernelIF();
kernel->type_flag = TypeFlag::FLOAT32;
kernel->describe_info = "Shared memory / Loop unrolling";
kernel->get_config = get_config;
kernel->func = func;
kernel->config_kernel = DotDeviceV2;
gpu_kernels_.push_back(kernel);
}
// Kernel v1.
{
auto get_config = [&](int len) -> Config1D {
Config1D config;
return config;
};
auto func = [&](Config1D config, int n, const void *x, void *result) -> void {
// CUBLAS_POINTER_MODE_DEVICE: Return data on device -> res is a pointer for device.
// CUBLAS_POINTER_MODE_HOST: On host.
CUBLAS_CHECK(cublasSetPointerMode(assistor_->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_CHECK(cublasSnrm2(assistor_->cublas_handle(), n, (float *)x, 1, (float *)result));
};
Nrm2GpuKernelIF *kernel = new Nrm2GpuKernelIF();
kernel->type_flag = TypeFlag::FLOAT32;
kernel->describe_info = "Cublas";
kernel->get_config = get_config;
kernel->func = func;
kernel->config_kernel = nullptr;
gpu_kernels_.push_back(kernel);
}
}
}
|
ad01f2e9f89e60400550caa9abfb6fbf60d15714.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (8)
#define MATRIX_N (32)
#define MATRIX_K (16)
const int WMMA_M =8;
const int WMMA_N =32;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef float ctype;
typedef float dtype;
typedef float host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
unsigned int start_time=0,end_time=0;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
start_time=clock();
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
end_time=clock();
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
printf("Time=%d\n",end_time-start_time);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N);
hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(hipEventRecord(stopWMMA));
hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(hipEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(hipFree(a_htype));
cudaErrCheck(hipFree(b_htype));
cudaErrCheck(hipFree(c_htype));
cudaErrCheck(hipFree(d_htype));
cudaErrCheck(hipFree(a_atype));
cudaErrCheck(hipFree(b_btype));
cudaErrCheck(hipFree(c_ctype));
cudaErrCheck(hipFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
|
ad01f2e9f89e60400550caa9abfb6fbf60d15714.cu
|
#include <stdio.h>
#include <curand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (8)
#define MATRIX_N (32)
#define MATRIX_K (16)
const int WMMA_M =8;
const int WMMA_N =32;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef float ctype;
typedef float dtype;
typedef float host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
unsigned int start_time=0,end_time=0;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
start_time=clock();
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
end_time=clock();
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
printf("Time=%d\n",end_time-start_time);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K);
convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N);
convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(cudaEventRecord(stopWMMA));
convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(cudaEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(cudaFree(a_htype));
cudaErrCheck(cudaFree(b_htype));
cudaErrCheck(cudaFree(c_htype));
cudaErrCheck(cudaFree(d_htype));
cudaErrCheck(cudaFree(a_atype));
cudaErrCheck(cudaFree(b_btype));
cudaErrCheck(cudaFree(c_ctype));
cudaErrCheck(cudaFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
9229aea83006bfecd7d4753f8e0bd21c5232a5bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompareT.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateDoubleType.h"
|
9229aea83006bfecd7d4753f8e0bd21c5232a5bb.cu
|
#include "../THCTensorMathCompareT.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include "../THCGenerateDoubleType.h"
|
e95311dd065473e3cc065af9464213f53bb0334a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <opencv2/cudaobjdetect.hpp>
#include <iostream>
#include <sys/time.h>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/cudaimgproc.hpp"
#include "opencv2/cudawarping.hpp"
#include <iomanip>
#include <hip/hip_runtime.h>
using namespace std;
using namespace cv;
//global call of cascade
void recog(string imageName, string mode)
{
//string cascadeName = "/home/ubuntu/Downloads/opencv/data/haarcascades_cuda/haarcascade_frontalface_alt2.xml";
string cascadeName = "haarcascade_frontalface_alt2.xml";
Ptr<cuda::CascadeClassifier> cascade = cuda::CascadeClassifier::create(cascadeName);
cv::CascadeClassifier cascadeCPU;
cascadeCPU.load(cascadeName);
if (cascade.empty() || cascadeCPU.empty()){
cerr << "Could not load model!" << endl;
// return -1;
}
Mat original = imread(imageName);
Mat gray = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE);
Mat work_img = original.clone();
Mat copy = original.clone();
//pre processing
cvtColor(original, work_img, COLOR_RGB2GRAY);
equalizeHist(work_img, work_img);
bool findLargestObject = false;
bool filterRects = true;
if (mode == "--GPU")
{
cuda::GpuMat inputGPU(work_img);
cuda::GpuMat faces;
faces.create(1, 10000, cv::DataType<cv::Rect>::type); ///
cascade->setFindLargestObject(findLargestObject);
cascade->setScaleFactor(1.2);
cascade->setMinNeighbors((filterRects || findLargestObject) ? 4 : 0);
cascade->detectMultiScale(inputGPU, faces);
vector<Rect> objects;
cascade->convert(faces, objects);
for(int i=0;i<(int)objects.size();++i)
{
rectangle(original, objects[i], Scalar(0, 255, 0), 3);
}
imshow("detections", original);
//moveWindow("detections", 100, 100);
//waitKey(0);
}
else if (mode == "--CPU")
{
cv::Mat inputCPU(work_img);
vector<Rect> facesCPU;
Size minsize = cascade->getClassifierSize();
cascadeCPU.detectMultiScale(inputCPU, facesCPU, 1.1,
(filterRects || findLargestObject) ? 4 : 0,
(findLargestObject ? CASCADE_FIND_BIGGEST_OBJECT : 0)
| CASCADE_SCALE_IMAGE,
minsize);
for(int i=0;i<(int)facesCPU.size();++i)
{
rectangle(work_img, facesCPU[i], Scalar(0, 255, 0), 3);
}
imshow("detections2", copy);
moveWindow("detections2", 100, 100);
//waitKey(0);
}
}
int main(int argc, char* argv[])
{
const char *images[9];
images[0] = "senthil100.jpg";
images[1] = "senthil500.jpg";
images[2] = "senthil1000.jpg";
images[3] = "Senthil2000.jpg";
images[4] = "Senthil3000.jpg";
images[5] = "Senthil4000.jpg";
images[6] = "Senthil5000.jpg";
images[7] = "Senthil6000.jpg";
images[8] = "Senthil7500.jpg";
for (int i = 0; i < 9; i++)
{
hipError_t error;
hipEvent_t start1;
error = hipEventCreate(&start1);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop1;
error = hipEventCreate(&stop1);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventRecord(start1, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
recog(string(images[i]), string(argv[1]));
error = hipEventRecord(stop1, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventSynchronize(stop1);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal1 = 0.0f;
error = hipEventElapsedTime(&msecTotal1, start1, stop1);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("run1: %.3f msec \n", msecTotal1);
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
recog(string(images[i]), string(argv[1]));
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("run2: %.3f msec \n", msecTotal);
//double speedup = runtimeC / runtimeG;
//printf("========speedup========= \n");
//printf("speedup: %f \n", speedup);
}
return 0;
}
|
e95311dd065473e3cc065af9464213f53bb0334a.cu
|
#include <opencv2/opencv.hpp>
#include <opencv2/cudaobjdetect.hpp>
#include <iostream>
#include <sys/time.h>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/cudaimgproc.hpp"
#include "opencv2/cudawarping.hpp"
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
using namespace cv;
//global call of cascade
void recog(string imageName, string mode)
{
//string cascadeName = "/home/ubuntu/Downloads/opencv/data/haarcascades_cuda/haarcascade_frontalface_alt2.xml";
string cascadeName = "haarcascade_frontalface_alt2.xml";
Ptr<cuda::CascadeClassifier> cascade = cuda::CascadeClassifier::create(cascadeName);
cv::CascadeClassifier cascadeCPU;
cascadeCPU.load(cascadeName);
if (cascade.empty() || cascadeCPU.empty()){
cerr << "Could not load model!" << endl;
// return -1;
}
Mat original = imread(imageName);
Mat gray = imread(imageName, CV_LOAD_IMAGE_GRAYSCALE);
Mat work_img = original.clone();
Mat copy = original.clone();
//pre processing
cvtColor(original, work_img, COLOR_RGB2GRAY);
equalizeHist(work_img, work_img);
bool findLargestObject = false;
bool filterRects = true;
if (mode == "--GPU")
{
cuda::GpuMat inputGPU(work_img);
cuda::GpuMat faces;
faces.create(1, 10000, cv::DataType<cv::Rect>::type); ///
cascade->setFindLargestObject(findLargestObject);
cascade->setScaleFactor(1.2);
cascade->setMinNeighbors((filterRects || findLargestObject) ? 4 : 0);
cascade->detectMultiScale(inputGPU, faces);
vector<Rect> objects;
cascade->convert(faces, objects);
for(int i=0;i<(int)objects.size();++i)
{
rectangle(original, objects[i], Scalar(0, 255, 0), 3);
}
imshow("detections", original);
//moveWindow("detections", 100, 100);
//waitKey(0);
}
else if (mode == "--CPU")
{
cv::Mat inputCPU(work_img);
vector<Rect> facesCPU;
Size minsize = cascade->getClassifierSize();
cascadeCPU.detectMultiScale(inputCPU, facesCPU, 1.1,
(filterRects || findLargestObject) ? 4 : 0,
(findLargestObject ? CASCADE_FIND_BIGGEST_OBJECT : 0)
| CASCADE_SCALE_IMAGE,
minsize);
for(int i=0;i<(int)facesCPU.size();++i)
{
rectangle(work_img, facesCPU[i], Scalar(0, 255, 0), 3);
}
imshow("detections2", copy);
moveWindow("detections2", 100, 100);
//waitKey(0);
}
}
int main(int argc, char* argv[])
{
const char *images[9];
images[0] = "senthil100.jpg";
images[1] = "senthil500.jpg";
images[2] = "senthil1000.jpg";
images[3] = "Senthil2000.jpg";
images[4] = "Senthil3000.jpg";
images[5] = "Senthil4000.jpg";
images[6] = "Senthil5000.jpg";
images[7] = "Senthil6000.jpg";
images[8] = "Senthil7500.jpg";
for (int i = 0; i < 9; i++)
{
cudaError_t error;
cudaEvent_t start1;
error = cudaEventCreate(&start1);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop1;
error = cudaEventCreate(&stop1);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventRecord(start1, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
recog(string(images[i]), string(argv[1]));
error = cudaEventRecord(stop1, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventSynchronize(stop1);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal1 = 0.0f;
error = cudaEventElapsedTime(&msecTotal1, start1, stop1);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("run1: %.3f msec \n", msecTotal1);
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
recog(string(images[i]), string(argv[1]));
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("run2: %.3f msec \n", msecTotal);
//double speedup = runtimeC / runtimeG;
//printf("========speedup========= \n");
//printf("speedup: %f \n", speedup);
}
return 0;
}
|
e7b83a35b23ab189582c25557334aff204e8c8e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ char x = 0;
__global__ void racey_kernel() {
#ifdef WW
x = threadIdx.x;
#elif RW
volatile char c = x;
x = c + 1;
#endif
}
int main() {
// sanity check test, would have been too messy to shoehorn into two_streams.cu
hipLaunchKernelGGL(( racey_kernel), dim3(1),dim3(1), 0, 0, );
hipLaunchKernelGGL(( racey_kernel), dim3(1),dim3(1), 0, 0, );
hipDeviceReset();
return 0;
}
|
e7b83a35b23ab189582c25557334aff204e8c8e5.cu
|
#include <stdio.h>
__device__ char x = 0;
__global__ void racey_kernel() {
#ifdef WW
x = threadIdx.x;
#elif RW
volatile char c = x;
x = c + 1;
#endif
}
int main() {
// sanity check test, would have been too messy to shoehorn into two_streams.cu
racey_kernel<<<1,1>>>();
racey_kernel<<<1,1>>>();
cudaDeviceReset();
return 0;
}
|
833a996e0fe4582acf0466eadf2738e0c9dc45c3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
#define SincWindow 16
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<bool cubicinterp, bool zerocentered> __global__ void GetFFTPlaneKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, cudaTex t_volumepsf, int dim, uint dimft, uint n, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts);
template<bool zerocentered> __global__ void GetFFTPlaneSincKernel(tcomplex* d_volumeft, tfloat* d_volumepsf, int dim, uint dimft, uint elementsvolume, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts);
__global__ void IntersectionKernel(float* d_distmin, float* d_distmax, int2 dims, tfloat3 boxmin, tfloat3 boxmax, glm::vec3 invdirection, char3 signs, glm::mat4 transform);
template <bool cubicinterp> __global__ void RaytraceVolumeKernel(cudaTex t_volume, int3 dimsvolume, tfloat* d_projection, int2 dimsimage, float* d_distmin, float* d_distmax, glm::vec3 direction, glm::mat4 transform);
/////////////////////////////////////////
//Equivalent of TOM's tom_proj3d method//
/////////////////////////////////////////
void d_ProjForward(tcomplex* d_volumeft, tfloat* d_volumepsf, int3 dimsvolume, tcomplex* d_projectionsft, tfloat* d_projectionspsf, tfloat3* h_angles, tfloat2* h_shifts, T_INTERP_MODE mode, bool outputzerocentered, int batch)
{
int2 dimsimage = toInt2(dimsvolume.x, dimsvolume.x);
int3 dimsvolumeft = toInt3(dimsvolume.x / 2 + 1, dimsvolume.y, dimsvolume.z);
// Calculate rotation matrices and shifts
glm::mat2x3* h_matrices = (glm::mat2x3*)malloc(batch * sizeof(glm::mat2x3));
tfloat2* h_normshifts = (tfloat2*)malloc(batch * sizeof(tfloat2));
for (int i = 0; i < batch; i++)
{
glm::mat3 r = Matrix3Euler(h_angles[i]);
h_matrices[i] = glm::mat2x3(r[0][0], r[0][1], r[0][2], r[1][0], r[1][1], r[1][2]);
h_normshifts[i] = tfloat2(-h_shifts[i].x / (tfloat)dimsimage.x, -h_shifts[i].y / (tfloat)dimsimage.y);
}
glm::mat2x3* d_matrices = (glm::mat2x3*)CudaMallocFromHostArray(h_matrices, batch * sizeof(glm::mat2x3));
tfloat2* d_normshifts = (tfloat2*)CudaMallocFromHostArray(h_normshifts, batch * sizeof(tfloat2));
free(h_normshifts);
free(h_matrices);
if (mode == T_INTERP_LINEAR || mode == T_INTERP_CUBIC)
{
// Prefilter and bind 3D textures
tfloat* d_tempRe, *d_tempIm;
hipMalloc((void**)&d_tempRe, ElementsFFT(dimsvolume) * sizeof(tfloat));
hipMalloc((void**)&d_tempIm, ElementsFFT(dimsvolume) * sizeof(tfloat));
d_ConvertTComplexToSplitComplex(d_volumeft, d_tempRe, d_tempIm, ElementsFFT(dimsvolume));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter3D(d_tempRe, dimsvolumeft);
d_CubicBSplinePrefilter3D(d_tempIm, dimsvolumeft);
}
hipArray_t a_volumeRe = 0, a_volumeIm = 0;
cudaTex t_volumeRe = 0, t_volumeIm = 0;
d_BindTextureTo3DArray(d_tempRe, a_volumeRe, t_volumeRe, dimsvolumeft, hipFilterModeLinear, false);
d_BindTextureTo3DArray(d_tempIm, a_volumeIm, t_volumeIm, dimsvolumeft, hipFilterModeLinear, false);
hipMemcpy(d_tempRe, d_volumepsf, ElementsFFT(dimsvolume) * sizeof(tfloat), hipMemcpyDeviceToDevice);
if (mode == T_INTERP_CUBIC)
d_CubicBSplinePrefilter3D(d_tempRe, dimsvolumeft);
hipArray_t a_volumepsf = 0;
cudaTex t_volumepsf = 0;
d_BindTextureTo3DArray(d_tempRe, a_volumepsf, t_volumepsf, dimsvolumeft, hipFilterModeLinear, false);
// Sample the planes
uint TpB = tmin(128, NextMultipleOf(ElementsFFT2(dimsimage), 32));
dim3 grid = dim3((ElementsFFT2(dimsimage) + TpB - 1) / TpB, batch);
if (mode == T_INTERP_CUBIC)
if (outputzerocentered)
GetFFTPlaneKernel<true, true> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneKernel<true, false> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
if (outputzerocentered)
GetFFTPlaneKernel<false, true> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneKernel<false, false> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
// Tear down
hipDestroyTextureObject(t_volumeIm);
hipFreeArray(a_volumeIm);
hipDestroyTextureObject(t_volumeRe);
hipFreeArray(a_volumeRe);
hipDestroyTextureObject(t_volumepsf);
hipFreeArray(a_volumepsf);
hipFree(d_tempRe);
hipFree(d_tempIm);
}
else if (mode == T_INTERP_SINC)
{
uint TpB = 192;
dim3 grid = dim3((ElementsFFT2(dimsimage) + TpB - 1) / TpB, batch);
if (outputzerocentered)
GetFFTPlaneSincKernel<true> << <grid, TpB >> > (d_volumeft, d_volumepsf, dimsvolume.x, dimsvolume.x / 2 + 1, ElementsFFT(dimsvolume), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneSincKernel<false> << <grid, TpB >> > (d_volumeft, d_volumepsf, dimsvolume.x, dimsvolume.x / 2 + 1, ElementsFFT(dimsvolume), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
}
hipFree(d_normshifts);
hipFree(d_matrices);
}
void d_ProjForward(tfloat* d_volume, tfloat* d_volumepsf, int3 dimsvolume, tfloat* d_projections, tfloat* d_projectionspsf, tfloat3* h_angles, tfloat2* h_shifts, T_INTERP_MODE mode, int batch)
{
int3 dimsimage = toInt3(dimsvolume.x, dimsvolume.y, 1);
// Alloc buffers for FFTed volume and projections
tcomplex* d_volumeft;
hipMalloc((void**)&d_volumeft, ElementsFFT(dimsvolume) * sizeof(tcomplex));
tcomplex* d_projft;
hipMalloc((void**)&d_projft, ElementsFFT2(dimsimage) * batch * sizeof(tcomplex));
d_FFTR2C(d_volume, d_volumeft, 3, dimsvolume);
d_RemapHalfFFT2Half(d_volumeft, d_volumeft, dimsvolume);
// Sample planes and transform back into real space
d_ProjForward(d_volumeft, d_volumepsf, dimsvolume, d_projft, d_projectionspsf, h_angles, h_shifts, mode, false, batch);
d_IFFTC2R(d_projft, d_projections, 2, dimsimage, batch);
// Tear down
hipFree(d_projft);
hipFree(d_volumeft);
}
void d_ProjForwardRaytrace(tfloat* d_volume, int3 dimsvolume, tfloat3 volumeoffset, tfloat* d_projections, int2 dimsproj, tfloat3* h_angles, tfloat2* h_offsets, tfloat2* h_scales, T_INTERP_MODE mode, int supersample, int batch)
{
dimsproj = toInt2(dimsproj.x * supersample, dimsproj.y * supersample);
dimsvolume = toInt3(dimsvolume.x * supersample, dimsvolume.y * supersample, dimsvolume.z * supersample);
tfloat* d_superproj, *d_supervolume;
if (supersample > 1)
{
hipMalloc((void**)&d_superproj, Elements2(dimsproj) * batch * sizeof(tfloat));
hipMalloc((void**)&d_supervolume, Elements(dimsvolume) * sizeof(tfloat));
d_Scale(d_volume, d_supervolume, toInt3(dimsvolume.x / supersample, dimsvolume.y / supersample, dimsvolume.z / supersample), dimsvolume, T_INTERP_FOURIER);
}
else
{
d_superproj = d_projections;
d_supervolume = d_volume;
}
tfloat* d_prefilteredvolume;
if (mode == T_INTERP_CUBIC)
hipMalloc((void**)&d_prefilteredvolume, Elements(dimsvolume) * sizeof(tfloat));
float* d_distmin, *d_distmax;
hipMalloc((void**)&d_distmin, Elements2(dimsproj) * batch * sizeof(float));
hipMalloc((void**)&d_distmax, Elements2(dimsproj) * batch * sizeof(float));
glm::mat4* h_raytransforms = (glm::mat4*)malloc(batch * sizeof(glm::mat4));
for (int n = 0; n < batch; n++)
h_raytransforms[n] = Matrix4Translation(tfloat3(dimsvolume.x / 2 + 0.5f, dimsvolume.y / 2 + 0.5f, dimsvolume.z / 2 + 0.5f)) *
Matrix4Translation(tfloat3(-volumeoffset.x, -volumeoffset.y, -volumeoffset.z)) *
Matrix4Euler(h_angles[n]) *
Matrix4Translation(tfloat3(h_offsets[n].x * supersample, h_offsets[n].y * supersample, 0.0f)) *
Matrix4Scale(tfloat3(h_scales[n].x, h_scales[n].y, 1.0f)) *
Matrix4Translation(tfloat3(-dimsproj.x / 2, -dimsproj.y / 2, 0));
tfloat3 boxmin = tfloat3(0, 0, 0);
tfloat3 boxmax = tfloat3(dimsvolume.x,
dimsvolume.y,
dimsvolume.z);
for (int n = 0; n < batch; n++)
{
int TpB = min(NextMultipleOf(dimsproj.x, 32), 256);
dim3 grid = dim3((dimsproj.x + TpB - 1) / TpB, dimsproj.y);
glm::vec3 direction = Matrix3Euler(h_angles[n]) * glm::vec3(0.0f, 0.0f, -1.0f);
glm::vec3 invdirection = glm::vec3(1.0f / direction.x, 1.0f / direction.y, 1.0f / direction.z);
char3 signs = make_char3(invdirection.x < 0.0f ? 1 : 0, invdirection.y < 0.0f ? 1 : 0, invdirection.z < 0.0f ? 1 : 0);
IntersectionKernel << <grid, TpB >> > (d_distmin + Elements2(dimsproj) * n, d_distmax + Elements2(dimsproj) * n, dimsproj, boxmin, boxmax, invdirection, signs, h_raytransforms[n]);
}
hipArray* a_volume;
cudaTex t_volume;
if (mode == T_INTERP_CUBIC)
{
hipMemcpy(d_prefilteredvolume, d_supervolume, Elements(dimsvolume) * sizeof(tfloat), hipMemcpyDeviceToDevice);
d_CubicBSplinePrefilter3D(d_prefilteredvolume, dimsvolume);
d_BindTextureTo3DArray(d_prefilteredvolume, a_volume, t_volume, dimsvolume, hipFilterModeLinear, false);
}
else
{
d_BindTextureTo3DArray(d_supervolume, a_volume, t_volume, dimsvolume, hipFilterModeLinear, false);
}
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dimsproj.x + 15) / 16, (dimsproj.y + 15) / 16);
for (int n = 0; n < batch; n++)
{
glm::vec3 direction = Matrix3Euler(h_angles[n]) * glm::vec3(0.0f, 0.0f, -1.0f);
if (mode == T_INTERP_CUBIC)
RaytraceVolumeKernel<true> << <grid, TpB >> > (t_volume,
dimsvolume,
d_superproj + Elements2(dimsproj) * n,
dimsproj,
d_distmin + Elements2(dimsproj) * n,
d_distmax + Elements2(dimsproj) * n,
direction,
h_raytransforms[n]);
else
RaytraceVolumeKernel<false> << <grid, TpB >> > (t_volume,
dimsvolume,
d_superproj + Elements2(dimsproj) * n,
dimsproj,
d_distmin + Elements2(dimsproj) * n,
d_distmax + Elements2(dimsproj) * n,
direction,
h_raytransforms[n]);
}
hipDestroyTextureObject(t_volume);
hipFreeArray(a_volume);
if (supersample > 1)
{
d_Scale(d_superproj, d_projections, toInt3(dimsproj), toInt3(dimsproj.x / supersample, dimsproj.y / supersample, 1), T_INTERP_FOURIER);
}
free(h_raytransforms);
hipFree(d_distmax);
hipFree(d_distmin);
if (mode == T_INTERP_CUBIC)
hipFree(d_prefilteredvolume);
if (supersample > 1)
{
hipFree(d_supervolume);
hipFree(d_superproj);
}
}
////////////////
//CUDA kernels//
////////////////
template<bool cubicinterp, bool zerocentered> __global__ void GetFFTPlaneKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, cudaTex t_volumepsf, int dim, uint dimft, uint n, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts)
{
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n)
return;
int idx = id % dimft;
int idy = id / dimft;
d_imageft += blockIdx.y * n;
d_imagepsf += blockIdx.y * n;
int x, y;
if (zerocentered)
{
x = idx;
y = idy;
}
else
{
x = dim / 2 - idx;
y = dim - 1 - ((idy + dim / 2 - 1) % dim);
}
glm::vec2 poslocal = glm::vec2(x, y);
poslocal -= (float)(dim / 2);
if (poslocal.x * poslocal.x + poslocal.y * poslocal.y >= (float)(dim * dim / 4))
{
d_imageft[id] = make_cuComplex(0, 0);
d_imagepsf[id] = 0;
return;
}
glm::vec3 posglobal = d_rotations[blockIdx.y] * poslocal;
bool flip = false;
if (posglobal.x > 0)
{
posglobal = -posglobal;
flip = true;
}
posglobal += (float)(dimft - 1) + 0.5f;
tfloat valRe = 0, valIm = 0, valpsf = 0;
if (cubicinterp)
{
valRe = cubicTex3D(t_volumeRe, posglobal.x, posglobal.y, posglobal.z);
valIm = cubicTex3D(t_volumeIm, posglobal.x, posglobal.y, posglobal.z);
valpsf = cubicTex3D(t_volumepsf, posglobal.x, posglobal.y, posglobal.z);
}
else
{
valRe = tex3D<tfloat>(t_volumeRe, posglobal.x, posglobal.y, posglobal.z);
valIm = tex3D<tfloat>(t_volumeIm, posglobal.x, posglobal.y, posglobal.z);
valpsf = tex3D<tfloat>(t_volumepsf, posglobal.x, posglobal.y, posglobal.z);
}
if (flip)
valIm = -valIm;
tfloat2 delta = d_shifts[blockIdx.y];
tfloat factor = (delta.x * poslocal.x + delta.y * poslocal.y) * PI2;
d_imageft[id] = cmul(make_cuComplex(valRe, valIm), make_cuComplex(cos(factor), sin(factor)));
d_imagepsf[id] = valpsf;
}
template<bool zerocentered> __global__ void GetFFTPlaneSincKernel(tcomplex* d_volumeft, tfloat* d_volumepsf, int dim, uint dimft, uint elementsvolume, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts)
{
__shared__ tfloat s_volumeftRe[192], s_volumeftIm[192];
__shared__ tfloat s_volumeweights[192];
uint id = blockIdx.x * blockDim.x + threadIdx.x;
d_imageft += blockIdx.y * dimft * dim;
d_imagepsf += blockIdx.y * dimft * dim;
uint x, y;
if (zerocentered)
{
x = id % dimft;
y = id / dimft;
}
else
{
x = dim / 2 - id % dimft;
y = dim - 1 - ((id / dimft + dim / 2 - 1) % dim);
}
tcomplex sumft = make_cuComplex(0, 0);
tfloat sumweight = 0;
float center = dim / 2;
glm::vec2 pos = glm::vec2(x, y);
pos -= center;
glm::vec3 posglobal = d_rotations[blockIdx.y] * pos;
for (uint poffset = 0; poffset < elementsvolume; poffset += 192)
{
{
uint pglobal = poffset + threadIdx.x;
if (pglobal < elementsvolume)
{
s_volumeftRe[threadIdx.x] = d_volumeft[pglobal].x;
s_volumeftIm[threadIdx.x] = d_volumeft[pglobal].y;
s_volumeweights[threadIdx.x] = d_volumepsf[pglobal];
}
}
__syncthreads();
uint plimit = min(192, elementsvolume - poffset);
for (uint p = 0; p < plimit; p++)
{
uint pglobal = poffset + p;
uint px = pglobal % dimft;
uint py = (pglobal / dimft) % dim;
uint pz = pglobal / (dimft * dim);
glm::vec3 pospixel = glm::vec3(px, py, pz) - center;
if (pospixel.x * pospixel.x + pospixel.y * pospixel.y + pospixel.z * pospixel.z < dim * dim * 4)
{
float s = sinc(pospixel.x - posglobal.x) * sinc(pospixel.y - posglobal.y) * sinc(pospixel.z - posglobal.z);
tcomplex val = make_cuComplex(s_volumeftRe[p], s_volumeftIm[p]);
tfloat valweight = s_volumeweights[p];
sumft += val * s;
sumweight += valweight * s;
if (px == dim / 2)
continue;
s = sinc(-pospixel.x - posglobal.x) * sinc(-pospixel.y - posglobal.y) * sinc(-pospixel.z - posglobal.z);
val.y = -val.y;
sumft += val * s;
sumweight += valweight * s;
}
}
__syncthreads();
}
if (id >= dimft * dim)
return;
float falloff = pos.x * pos.x + pos.y * pos.y >= (float)(dim * dim / 4) ? 0.0f : 1.0f;
sumft *= falloff;
sumweight *= falloff;
tfloat2 delta = d_shifts[blockIdx.y];
tfloat factor = (delta.x * pos.x + delta.y * pos.y) * PI2;
d_imageft[id] = cmul(sumft, make_cuComplex(cos(factor), sin(factor)));
d_imagepsf[id] = sumweight;
}
__global__ void IntersectionKernel(float* d_distmin, float* d_distmax, int2 dims, tfloat3 boxmin, tfloat3 boxmax, glm::vec3 invdirection, char3 signs, glm::mat4 transform)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
d_distmin += idy * dims.x + idx;
d_distmax += idy * dims.x + idx;
glm::vec4 origin = transform * glm::vec4((float)idx, (float)idy, 9999.0f, 1.0f);
float tmin, tmax, tymin, tymax, tzmin, tzmax;
tmin = ((signs.x ? boxmax.x : boxmin.x) - origin.x) * invdirection.x;
tmax = ((signs.x ? boxmin.x : boxmax.x) - origin.x) * invdirection.x;
tymin = ((signs.y ? boxmax.y : boxmin.y) - origin.y) * invdirection.y;
tymax = ((signs.y ? boxmin.y : boxmax.y) - origin.y) * invdirection.y;
if ((tmin > tymax) || (tymin > tmax))
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
return;
}
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = ((signs.z ? boxmax.z : boxmin.z) - origin.z) * invdirection.z;
tzmax = ((signs.z ? boxmin.z : boxmax.z) - origin.z) * invdirection.z;
if ((tmin > tzmax) || (tzmin > tmax))
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
return;
}
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if (!isnan(tmin) && !isnan(tmax))
{
*d_distmin = tmin;
*d_distmax = tmax;
}
else
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
}
}
template <bool cubicinterp> __global__ void RaytraceVolumeKernel(cudaTex t_volume, int3 dimsvolume, tfloat* d_projection, int2 dimsimage, float* d_distmin, float* d_distmax, glm::vec3 direction, glm::mat4 transform)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dimsimage.x)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dimsimage.y)
return;
float distmin = d_distmin[idy * dimsimage.x + idx];
float distmax = d_distmax[idy * dimsimage.x + idx];
d_projection += idy * dimsimage.x + idx;
float pathlength = distmax - distmin;
ushort steps = ceil(pathlength * 5.0f);
double sum = 0.0;
if (steps > 0)
{
float steplength = pathlength / (float)steps;
glm::vec4 origin4 = transform * glm::vec4((float)idx, (float)idy, 9999.0f, 1.0f);
glm::vec3 origin = glm::vec3(origin4.x, origin4.y, origin4.z);
distmin += steplength / 2.0f;
for (ushort i = 0; i < steps; i++)
{
glm::vec3 point = (distmin + (float)i * steplength) * direction + origin;
if (cubicinterp)
sum += cubicTex3D(t_volume, point.x, point.y, point.z) * steplength;
else
sum += tex3D<tfloat>(t_volume, point.x, point.y, point.z) * steplength;
}
}
*d_projection = sum;
}
}
|
833a996e0fe4582acf0466eadf2738e0c9dc45c3.cu
|
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Transformation.cuh"
namespace gtom
{
#define SincWindow 16
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
template<bool cubicinterp, bool zerocentered> __global__ void GetFFTPlaneKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, cudaTex t_volumepsf, int dim, uint dimft, uint n, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts);
template<bool zerocentered> __global__ void GetFFTPlaneSincKernel(tcomplex* d_volumeft, tfloat* d_volumepsf, int dim, uint dimft, uint elementsvolume, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts);
__global__ void IntersectionKernel(float* d_distmin, float* d_distmax, int2 dims, tfloat3 boxmin, tfloat3 boxmax, glm::vec3 invdirection, char3 signs, glm::mat4 transform);
template <bool cubicinterp> __global__ void RaytraceVolumeKernel(cudaTex t_volume, int3 dimsvolume, tfloat* d_projection, int2 dimsimage, float* d_distmin, float* d_distmax, glm::vec3 direction, glm::mat4 transform);
/////////////////////////////////////////
//Equivalent of TOM's tom_proj3d method//
/////////////////////////////////////////
void d_ProjForward(tcomplex* d_volumeft, tfloat* d_volumepsf, int3 dimsvolume, tcomplex* d_projectionsft, tfloat* d_projectionspsf, tfloat3* h_angles, tfloat2* h_shifts, T_INTERP_MODE mode, bool outputzerocentered, int batch)
{
int2 dimsimage = toInt2(dimsvolume.x, dimsvolume.x);
int3 dimsvolumeft = toInt3(dimsvolume.x / 2 + 1, dimsvolume.y, dimsvolume.z);
// Calculate rotation matrices and shifts
glm::mat2x3* h_matrices = (glm::mat2x3*)malloc(batch * sizeof(glm::mat2x3));
tfloat2* h_normshifts = (tfloat2*)malloc(batch * sizeof(tfloat2));
for (int i = 0; i < batch; i++)
{
glm::mat3 r = Matrix3Euler(h_angles[i]);
h_matrices[i] = glm::mat2x3(r[0][0], r[0][1], r[0][2], r[1][0], r[1][1], r[1][2]);
h_normshifts[i] = tfloat2(-h_shifts[i].x / (tfloat)dimsimage.x, -h_shifts[i].y / (tfloat)dimsimage.y);
}
glm::mat2x3* d_matrices = (glm::mat2x3*)CudaMallocFromHostArray(h_matrices, batch * sizeof(glm::mat2x3));
tfloat2* d_normshifts = (tfloat2*)CudaMallocFromHostArray(h_normshifts, batch * sizeof(tfloat2));
free(h_normshifts);
free(h_matrices);
if (mode == T_INTERP_LINEAR || mode == T_INTERP_CUBIC)
{
// Prefilter and bind 3D textures
tfloat* d_tempRe, *d_tempIm;
cudaMalloc((void**)&d_tempRe, ElementsFFT(dimsvolume) * sizeof(tfloat));
cudaMalloc((void**)&d_tempIm, ElementsFFT(dimsvolume) * sizeof(tfloat));
d_ConvertTComplexToSplitComplex(d_volumeft, d_tempRe, d_tempIm, ElementsFFT(dimsvolume));
if (mode == T_INTERP_CUBIC)
{
d_CubicBSplinePrefilter3D(d_tempRe, dimsvolumeft);
d_CubicBSplinePrefilter3D(d_tempIm, dimsvolumeft);
}
cudaArray_t a_volumeRe = 0, a_volumeIm = 0;
cudaTex t_volumeRe = 0, t_volumeIm = 0;
d_BindTextureTo3DArray(d_tempRe, a_volumeRe, t_volumeRe, dimsvolumeft, cudaFilterModeLinear, false);
d_BindTextureTo3DArray(d_tempIm, a_volumeIm, t_volumeIm, dimsvolumeft, cudaFilterModeLinear, false);
cudaMemcpy(d_tempRe, d_volumepsf, ElementsFFT(dimsvolume) * sizeof(tfloat), cudaMemcpyDeviceToDevice);
if (mode == T_INTERP_CUBIC)
d_CubicBSplinePrefilter3D(d_tempRe, dimsvolumeft);
cudaArray_t a_volumepsf = 0;
cudaTex t_volumepsf = 0;
d_BindTextureTo3DArray(d_tempRe, a_volumepsf, t_volumepsf, dimsvolumeft, cudaFilterModeLinear, false);
// Sample the planes
uint TpB = tmin(128, NextMultipleOf(ElementsFFT2(dimsimage), 32));
dim3 grid = dim3((ElementsFFT2(dimsimage) + TpB - 1) / TpB, batch);
if (mode == T_INTERP_CUBIC)
if (outputzerocentered)
GetFFTPlaneKernel<true, true> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneKernel<true, false> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
if (outputzerocentered)
GetFFTPlaneKernel<false, true> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneKernel<false, false> << <grid, TpB >> > (t_volumeRe, t_volumeIm, t_volumepsf, dimsimage.x, dimsimage.x / 2 + 1, ElementsFFT2(dimsimage), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
// Tear down
cudaDestroyTextureObject(t_volumeIm);
cudaFreeArray(a_volumeIm);
cudaDestroyTextureObject(t_volumeRe);
cudaFreeArray(a_volumeRe);
cudaDestroyTextureObject(t_volumepsf);
cudaFreeArray(a_volumepsf);
cudaFree(d_tempRe);
cudaFree(d_tempIm);
}
else if (mode == T_INTERP_SINC)
{
uint TpB = 192;
dim3 grid = dim3((ElementsFFT2(dimsimage) + TpB - 1) / TpB, batch);
if (outputzerocentered)
GetFFTPlaneSincKernel<true> << <grid, TpB >> > (d_volumeft, d_volumepsf, dimsvolume.x, dimsvolume.x / 2 + 1, ElementsFFT(dimsvolume), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
else
GetFFTPlaneSincKernel<false> << <grid, TpB >> > (d_volumeft, d_volumepsf, dimsvolume.x, dimsvolume.x / 2 + 1, ElementsFFT(dimsvolume), d_projectionsft, d_projectionspsf, d_matrices, d_normshifts);
}
cudaFree(d_normshifts);
cudaFree(d_matrices);
}
void d_ProjForward(tfloat* d_volume, tfloat* d_volumepsf, int3 dimsvolume, tfloat* d_projections, tfloat* d_projectionspsf, tfloat3* h_angles, tfloat2* h_shifts, T_INTERP_MODE mode, int batch)
{
int3 dimsimage = toInt3(dimsvolume.x, dimsvolume.y, 1);
// Alloc buffers for FFTed volume and projections
tcomplex* d_volumeft;
cudaMalloc((void**)&d_volumeft, ElementsFFT(dimsvolume) * sizeof(tcomplex));
tcomplex* d_projft;
cudaMalloc((void**)&d_projft, ElementsFFT2(dimsimage) * batch * sizeof(tcomplex));
d_FFTR2C(d_volume, d_volumeft, 3, dimsvolume);
d_RemapHalfFFT2Half(d_volumeft, d_volumeft, dimsvolume);
// Sample planes and transform back into real space
d_ProjForward(d_volumeft, d_volumepsf, dimsvolume, d_projft, d_projectionspsf, h_angles, h_shifts, mode, false, batch);
d_IFFTC2R(d_projft, d_projections, 2, dimsimage, batch);
// Tear down
cudaFree(d_projft);
cudaFree(d_volumeft);
}
void d_ProjForwardRaytrace(tfloat* d_volume, int3 dimsvolume, tfloat3 volumeoffset, tfloat* d_projections, int2 dimsproj, tfloat3* h_angles, tfloat2* h_offsets, tfloat2* h_scales, T_INTERP_MODE mode, int supersample, int batch)
{
dimsproj = toInt2(dimsproj.x * supersample, dimsproj.y * supersample);
dimsvolume = toInt3(dimsvolume.x * supersample, dimsvolume.y * supersample, dimsvolume.z * supersample);
tfloat* d_superproj, *d_supervolume;
if (supersample > 1)
{
cudaMalloc((void**)&d_superproj, Elements2(dimsproj) * batch * sizeof(tfloat));
cudaMalloc((void**)&d_supervolume, Elements(dimsvolume) * sizeof(tfloat));
d_Scale(d_volume, d_supervolume, toInt3(dimsvolume.x / supersample, dimsvolume.y / supersample, dimsvolume.z / supersample), dimsvolume, T_INTERP_FOURIER);
}
else
{
d_superproj = d_projections;
d_supervolume = d_volume;
}
tfloat* d_prefilteredvolume;
if (mode == T_INTERP_CUBIC)
cudaMalloc((void**)&d_prefilteredvolume, Elements(dimsvolume) * sizeof(tfloat));
float* d_distmin, *d_distmax;
cudaMalloc((void**)&d_distmin, Elements2(dimsproj) * batch * sizeof(float));
cudaMalloc((void**)&d_distmax, Elements2(dimsproj) * batch * sizeof(float));
glm::mat4* h_raytransforms = (glm::mat4*)malloc(batch * sizeof(glm::mat4));
for (int n = 0; n < batch; n++)
h_raytransforms[n] = Matrix4Translation(tfloat3(dimsvolume.x / 2 + 0.5f, dimsvolume.y / 2 + 0.5f, dimsvolume.z / 2 + 0.5f)) *
Matrix4Translation(tfloat3(-volumeoffset.x, -volumeoffset.y, -volumeoffset.z)) *
Matrix4Euler(h_angles[n]) *
Matrix4Translation(tfloat3(h_offsets[n].x * supersample, h_offsets[n].y * supersample, 0.0f)) *
Matrix4Scale(tfloat3(h_scales[n].x, h_scales[n].y, 1.0f)) *
Matrix4Translation(tfloat3(-dimsproj.x / 2, -dimsproj.y / 2, 0));
tfloat3 boxmin = tfloat3(0, 0, 0);
tfloat3 boxmax = tfloat3(dimsvolume.x,
dimsvolume.y,
dimsvolume.z);
for (int n = 0; n < batch; n++)
{
int TpB = min(NextMultipleOf(dimsproj.x, 32), 256);
dim3 grid = dim3((dimsproj.x + TpB - 1) / TpB, dimsproj.y);
glm::vec3 direction = Matrix3Euler(h_angles[n]) * glm::vec3(0.0f, 0.0f, -1.0f);
glm::vec3 invdirection = glm::vec3(1.0f / direction.x, 1.0f / direction.y, 1.0f / direction.z);
char3 signs = make_char3(invdirection.x < 0.0f ? 1 : 0, invdirection.y < 0.0f ? 1 : 0, invdirection.z < 0.0f ? 1 : 0);
IntersectionKernel << <grid, TpB >> > (d_distmin + Elements2(dimsproj) * n, d_distmax + Elements2(dimsproj) * n, dimsproj, boxmin, boxmax, invdirection, signs, h_raytransforms[n]);
}
cudaArray* a_volume;
cudaTex t_volume;
if (mode == T_INTERP_CUBIC)
{
cudaMemcpy(d_prefilteredvolume, d_supervolume, Elements(dimsvolume) * sizeof(tfloat), cudaMemcpyDeviceToDevice);
d_CubicBSplinePrefilter3D(d_prefilteredvolume, dimsvolume);
d_BindTextureTo3DArray(d_prefilteredvolume, a_volume, t_volume, dimsvolume, cudaFilterModeLinear, false);
}
else
{
d_BindTextureTo3DArray(d_supervolume, a_volume, t_volume, dimsvolume, cudaFilterModeLinear, false);
}
dim3 TpB = dim3(16, 16);
dim3 grid = dim3((dimsproj.x + 15) / 16, (dimsproj.y + 15) / 16);
for (int n = 0; n < batch; n++)
{
glm::vec3 direction = Matrix3Euler(h_angles[n]) * glm::vec3(0.0f, 0.0f, -1.0f);
if (mode == T_INTERP_CUBIC)
RaytraceVolumeKernel<true> << <grid, TpB >> > (t_volume,
dimsvolume,
d_superproj + Elements2(dimsproj) * n,
dimsproj,
d_distmin + Elements2(dimsproj) * n,
d_distmax + Elements2(dimsproj) * n,
direction,
h_raytransforms[n]);
else
RaytraceVolumeKernel<false> << <grid, TpB >> > (t_volume,
dimsvolume,
d_superproj + Elements2(dimsproj) * n,
dimsproj,
d_distmin + Elements2(dimsproj) * n,
d_distmax + Elements2(dimsproj) * n,
direction,
h_raytransforms[n]);
}
cudaDestroyTextureObject(t_volume);
cudaFreeArray(a_volume);
if (supersample > 1)
{
d_Scale(d_superproj, d_projections, toInt3(dimsproj), toInt3(dimsproj.x / supersample, dimsproj.y / supersample, 1), T_INTERP_FOURIER);
}
free(h_raytransforms);
cudaFree(d_distmax);
cudaFree(d_distmin);
if (mode == T_INTERP_CUBIC)
cudaFree(d_prefilteredvolume);
if (supersample > 1)
{
cudaFree(d_supervolume);
cudaFree(d_superproj);
}
}
////////////////
//CUDA kernels//
////////////////
template<bool cubicinterp, bool zerocentered> __global__ void GetFFTPlaneKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, cudaTex t_volumepsf, int dim, uint dimft, uint n, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts)
{
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n)
return;
int idx = id % dimft;
int idy = id / dimft;
d_imageft += blockIdx.y * n;
d_imagepsf += blockIdx.y * n;
int x, y;
if (zerocentered)
{
x = idx;
y = idy;
}
else
{
x = dim / 2 - idx;
y = dim - 1 - ((idy + dim / 2 - 1) % dim);
}
glm::vec2 poslocal = glm::vec2(x, y);
poslocal -= (float)(dim / 2);
if (poslocal.x * poslocal.x + poslocal.y * poslocal.y >= (float)(dim * dim / 4))
{
d_imageft[id] = make_cuComplex(0, 0);
d_imagepsf[id] = 0;
return;
}
glm::vec3 posglobal = d_rotations[blockIdx.y] * poslocal;
bool flip = false;
if (posglobal.x > 0)
{
posglobal = -posglobal;
flip = true;
}
posglobal += (float)(dimft - 1) + 0.5f;
tfloat valRe = 0, valIm = 0, valpsf = 0;
if (cubicinterp)
{
valRe = cubicTex3D(t_volumeRe, posglobal.x, posglobal.y, posglobal.z);
valIm = cubicTex3D(t_volumeIm, posglobal.x, posglobal.y, posglobal.z);
valpsf = cubicTex3D(t_volumepsf, posglobal.x, posglobal.y, posglobal.z);
}
else
{
valRe = tex3D<tfloat>(t_volumeRe, posglobal.x, posglobal.y, posglobal.z);
valIm = tex3D<tfloat>(t_volumeIm, posglobal.x, posglobal.y, posglobal.z);
valpsf = tex3D<tfloat>(t_volumepsf, posglobal.x, posglobal.y, posglobal.z);
}
if (flip)
valIm = -valIm;
tfloat2 delta = d_shifts[blockIdx.y];
tfloat factor = (delta.x * poslocal.x + delta.y * poslocal.y) * PI2;
d_imageft[id] = cmul(make_cuComplex(valRe, valIm), make_cuComplex(cos(factor), sin(factor)));
d_imagepsf[id] = valpsf;
}
template<bool zerocentered> __global__ void GetFFTPlaneSincKernel(tcomplex* d_volumeft, tfloat* d_volumepsf, int dim, uint dimft, uint elementsvolume, tcomplex* d_imageft, tfloat* d_imagepsf, glm::mat2x3* d_rotations, tfloat2* d_shifts)
{
__shared__ tfloat s_volumeftRe[192], s_volumeftIm[192];
__shared__ tfloat s_volumeweights[192];
uint id = blockIdx.x * blockDim.x + threadIdx.x;
d_imageft += blockIdx.y * dimft * dim;
d_imagepsf += blockIdx.y * dimft * dim;
uint x, y;
if (zerocentered)
{
x = id % dimft;
y = id / dimft;
}
else
{
x = dim / 2 - id % dimft;
y = dim - 1 - ((id / dimft + dim / 2 - 1) % dim);
}
tcomplex sumft = make_cuComplex(0, 0);
tfloat sumweight = 0;
float center = dim / 2;
glm::vec2 pos = glm::vec2(x, y);
pos -= center;
glm::vec3 posglobal = d_rotations[blockIdx.y] * pos;
for (uint poffset = 0; poffset < elementsvolume; poffset += 192)
{
{
uint pglobal = poffset + threadIdx.x;
if (pglobal < elementsvolume)
{
s_volumeftRe[threadIdx.x] = d_volumeft[pglobal].x;
s_volumeftIm[threadIdx.x] = d_volumeft[pglobal].y;
s_volumeweights[threadIdx.x] = d_volumepsf[pglobal];
}
}
__syncthreads();
uint plimit = min(192, elementsvolume - poffset);
for (uint p = 0; p < plimit; p++)
{
uint pglobal = poffset + p;
uint px = pglobal % dimft;
uint py = (pglobal / dimft) % dim;
uint pz = pglobal / (dimft * dim);
glm::vec3 pospixel = glm::vec3(px, py, pz) - center;
if (pospixel.x * pospixel.x + pospixel.y * pospixel.y + pospixel.z * pospixel.z < dim * dim * 4)
{
float s = sinc(pospixel.x - posglobal.x) * sinc(pospixel.y - posglobal.y) * sinc(pospixel.z - posglobal.z);
tcomplex val = make_cuComplex(s_volumeftRe[p], s_volumeftIm[p]);
tfloat valweight = s_volumeweights[p];
sumft += val * s;
sumweight += valweight * s;
if (px == dim / 2)
continue;
s = sinc(-pospixel.x - posglobal.x) * sinc(-pospixel.y - posglobal.y) * sinc(-pospixel.z - posglobal.z);
val.y = -val.y;
sumft += val * s;
sumweight += valweight * s;
}
}
__syncthreads();
}
if (id >= dimft * dim)
return;
float falloff = pos.x * pos.x + pos.y * pos.y >= (float)(dim * dim / 4) ? 0.0f : 1.0f;
sumft *= falloff;
sumweight *= falloff;
tfloat2 delta = d_shifts[blockIdx.y];
tfloat factor = (delta.x * pos.x + delta.y * pos.y) * PI2;
d_imageft[id] = cmul(sumft, make_cuComplex(cos(factor), sin(factor)));
d_imagepsf[id] = sumweight;
}
__global__ void IntersectionKernel(float* d_distmin, float* d_distmax, int2 dims, tfloat3 boxmin, tfloat3 boxmax, glm::vec3 invdirection, char3 signs, glm::mat4 transform)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dims.x)
return;
int idy = blockIdx.y;
d_distmin += idy * dims.x + idx;
d_distmax += idy * dims.x + idx;
glm::vec4 origin = transform * glm::vec4((float)idx, (float)idy, 9999.0f, 1.0f);
float tmin, tmax, tymin, tymax, tzmin, tzmax;
tmin = ((signs.x ? boxmax.x : boxmin.x) - origin.x) * invdirection.x;
tmax = ((signs.x ? boxmin.x : boxmax.x) - origin.x) * invdirection.x;
tymin = ((signs.y ? boxmax.y : boxmin.y) - origin.y) * invdirection.y;
tymax = ((signs.y ? boxmin.y : boxmax.y) - origin.y) * invdirection.y;
if ((tmin > tymax) || (tymin > tmax))
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
return;
}
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = ((signs.z ? boxmax.z : boxmin.z) - origin.z) * invdirection.z;
tzmax = ((signs.z ? boxmin.z : boxmax.z) - origin.z) * invdirection.z;
if ((tmin > tzmax) || (tzmin > tmax))
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
return;
}
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if (!isnan(tmin) && !isnan(tmax))
{
*d_distmin = tmin;
*d_distmax = tmax;
}
else
{
*d_distmin = 0.0f;
*d_distmax = 0.0f;
}
}
template <bool cubicinterp> __global__ void RaytraceVolumeKernel(cudaTex t_volume, int3 dimsvolume, tfloat* d_projection, int2 dimsimage, float* d_distmin, float* d_distmax, glm::vec3 direction, glm::mat4 transform)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= dimsimage.x)
return;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dimsimage.y)
return;
float distmin = d_distmin[idy * dimsimage.x + idx];
float distmax = d_distmax[idy * dimsimage.x + idx];
d_projection += idy * dimsimage.x + idx;
float pathlength = distmax - distmin;
ushort steps = ceil(pathlength * 5.0f);
double sum = 0.0;
if (steps > 0)
{
float steplength = pathlength / (float)steps;
glm::vec4 origin4 = transform * glm::vec4((float)idx, (float)idy, 9999.0f, 1.0f);
glm::vec3 origin = glm::vec3(origin4.x, origin4.y, origin4.z);
distmin += steplength / 2.0f;
for (ushort i = 0; i < steps; i++)
{
glm::vec3 point = (distmin + (float)i * steplength) * direction + origin;
if (cubicinterp)
sum += cubicTex3D(t_volume, point.x, point.y, point.z) * steplength;
else
sum += tex3D<tfloat>(t_volume, point.x, point.y, point.z) * steplength;
}
}
*d_projection = sum;
}
}
|
d358ec4e3178b6738ac96631e7f9ef3a325de221.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <cstdio>
#include <string>
#include "neural_net.h"
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size,
int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] =
-1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size,
int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( computeSoftmaxLoss<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
(float *)layer_input[num_layers], this->y, loss, batch_size,
num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
hipLaunchKernelGGL(( computeSoftmaxLoss<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
(double *)layer_input[num_layers], this->y, loss, batch_size,
num_classes, softmax_eps);
}
checkCudaErrors(hipMemcpy(h_loss, loss, batch_size * sizeof(float),
hipMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++) total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int),
hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1;
}
} else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
hipLaunchKernelGGL(( inferClass<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int),
hipMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type,
int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps,
float init_std_dev, vDNNType vdnn_type,
vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(hipStreamCreate(&stream_compute));
checkCudaErrors(hipStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(hipblasCreate(&cublas_handle));
checkCUBLAS(hipblasSetStream(cublas_handle, stream_compute));
checkCURAND(hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT));
checkCURAND(hiprandSetStream(curand_gen, stream_compute));
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])
->initializeValues(cudnn_handle, user_params, this->data_type,
batch_size, this->tensor_format, data_type_size,
current_output_size, update_rule);
} else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])
->initializeValues(user_params, batch_size, this->tensor_format,
this->data_type, current_output_size, update_rule);
} else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])
->initializeValues(cudnn_handle, user_params, this->data_type,
batch_size, this->tensor_format,
current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params =
(BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size, update_rule);
} else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params =
(ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C <<
// current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() !=
// prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size
// mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes
<< std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC
// layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])
->allocateSpace(curand_gen, this->data_type, data_type_size,
init_std_dev, free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels *
user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
} else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])
->allocateSpace(curand_gen, this->data_type, data_type_size,
init_std_dev, free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
} else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])
->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params =
(BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])
->allocateSpace(this->data_type, data_type_size, free_bytes,
pre_alloc_batch_norm_derivative);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels *
user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
} else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params =
(ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(hipMalloc(&layer_input[i + 1], input_size *
// data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i + 1], input_size *
// data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(hipMalloc(&layer_input[i], input_size *
// data_type_size));
// checkCudaErrors(hipMalloc(&dlayer_input[i], input_size *
// data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes
<< std::endl;
// very small - could be allocated initially itself
checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(hipMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)one_vec,
batch_size, 1);
else
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)one_vec,
batch_size, 1);
checkCudaErrors(hipHostMalloc((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size,
// free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(hipMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): "
<< (max_consume - exp_max_consume) / (1.0 * 1024 * 1024)
<< std::endl;
std::cout << "exp_free_bytes(MB): "
<< (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024)
<< std::endl;
std::cout << "exp_total_consume(MB): "
<< (init_free_bytes - (free_bytes - exp_max_consume)) /
(1.0 * 1024 * 1024)
<< std::endl;
std::cout << "actual_total_consume(MB): "
<< (init_free_bytes - (free_bytes - max_consume)) /
(1.0 * 1024 * 1024)
<< std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to hipMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(hipHostMalloc(&h_layer_input[i],
layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(hipDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(hipMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes
<< std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(hipEventCreate(&start_compute));
checkCudaErrors(hipEventCreate(&stop_compute));
checkCudaErrors(hipEventCreate(&start_transfer));
checkCudaErrors(hipEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref,
bool hard, size_t &exp_max_consume,
size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed()
<< std::endl;
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX) break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed()
<< std::endl;
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard,
cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable()) return false;
std::cerr << "Used space after workspace allocation(MB): "
<< space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size !=
layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed()
<< std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(
CnmemSpace::SUB,
layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref,
hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref,
hard, cur_data_workspace_size));
size_t cur_workspace_size =
(cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable()) return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): "
<< space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
} else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(
CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(
CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
} else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable()) return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(hipMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
} else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
} else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
} else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr =
fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes) break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(
cnmem_memory_state_fptr,
"//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(
cnmem_memory_state_fptr,
"//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemMalloc(&layer_input[0],
layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes,
checkCNMEM(cnmemFinalize());
continue, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. layer_input[%d] - size: %lu\n", 0,
layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. layer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break,
out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. conv. workspace - size: %lu\n",
cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers],
batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume,
free_bytes, checkCNMEM(cnmemFinalize());
continue, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. dlayer_input[%d] - size: %lu\n", num_layers,
layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(
cnmemMalloc(
&layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. prefetch layer_input[%d] - size: %lu\n",
layer_to_prefetch,
layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. dlayer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n",
cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n",
cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size =
(cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break,
out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. conv. workspace - size: %lu\n",
cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n",
cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n",
cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n",
cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n",
cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
} else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free dlayer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size =
(init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size =
(max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n",
exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n",
act_size);
} else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
} else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
} else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and
// return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload
// and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft,
exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft,
exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++) to_offload[i] = false;
} else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
} else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train,
int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate,
std::vector<float> &fwd_vdnn_lag,
std::vector<float> &bwd_vdnn_lag, bool train,
int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
checkCNMEM(
cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[0] * data_type_size);
checkCudaErrors(hipMemcpy(
layer_input[0], X,
batch_size * input_channels * input_h * input_w * data_type_size,
hipMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size,
hipMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1) break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(hipMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size,
hipMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc,
cur_params->b, &alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W,
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1],
cur_params->C_out));
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W,
cur_params->C_out, (double *)layer_input[i], cur_params->C_in,
&Dbeta, (double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1],
cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
} else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
layer_input[i], cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space, cur_params->reserved_space_size));
} else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->factor, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
} else {
checkCUDNN(cudnnBatchNormalizationForwardInference(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon));
}
} else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->output_tensor, layer_input[i + 1]));
} else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1]));
} else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(hipDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to
// synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train) clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 +
(end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(
CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers],
batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers
// - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0,
batch_size * num_classes * sizeof(float)));
hipLaunchKernelGGL(( softmaxLossBackProp<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes,
softmax_eps);
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(hipMemset(dlayer_input[num_layers], 0,
batch_size * num_classes * sizeof(double)));
hipLaunchKernelGGL(( softmaxLossBackProp<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0,
this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes,
softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(
&layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(
CnmemSpace::SUB,
layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(hipMemcpyAsync(
layer_input[layer_to_prefetch],
h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size,
hipMemcpyHostToDevice, stream_memory));
} else {
// std::cout << "transfer here\n";
checkCudaErrors(hipMemcpyAsync(
layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size,
hipMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(
cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1],
&beta, cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc,
cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->filter_desc, cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(
cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace,
cur_workspace_size, &beta, cur_params->input_tensor,
dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(
CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size, &Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size, &Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out,
cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1],
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha, (float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta, (float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1,
batch_size, &Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)one_vec, batch_size, &Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)layer_input[i],
cur_params->C_in, &Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha, (double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta, (double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(
cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale, cur_params->dscale,
cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean,
cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1], &beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(hipDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(hipStreamSynchronize(stream_compute));
if (train) clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(hipStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 +
(end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(
CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
}
} else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
} else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
|
d358ec4e3178b6738ac96631e7f9ef3a325de221.cu
|
#include <time.h>
#include <cstdio>
#include <string>
#include "neural_net.h"
template <typename T>
__global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size,
int output_size, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
int cur_class = static_cast<int>(y[i]);
dSO[i * output_size + cur_class] =
-1 / (SO[i * output_size + cur_class] * batch_size + eps);
}
template <typename T>
__global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size,
int num_classes, float eps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
loss[i] = -logf(O[i * num_classes + y[i]] + eps);
}
template <typename T>
__global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size) return;
T max = O[i * num_classes];
int index = 0;
for (int j = 1; j < num_classes; j++) {
if (O[i * num_classes + j] > max) {
max = O[i * num_classes + j];
index = j;
}
}
pred_y[i] = index;
}
float NeuralNet::computeLoss() {
if (layer_type[num_layers - 1] == SOFTMAX) {
if (data_type == CUDNN_DATA_FLOAT)
computeSoftmaxLoss<float><<<ceil(1.0 * batch_size / BW), BW>>>(
(float *)layer_input[num_layers], this->y, loss, batch_size,
num_classes, softmax_eps);
else if (data_type == CUDNN_DATA_DOUBLE)
computeSoftmaxLoss<double><<<ceil(1.0 * batch_size / BW), BW>>>(
(double *)layer_input[num_layers], this->y, loss, batch_size,
num_classes, softmax_eps);
}
checkCudaErrors(cudaMemcpy(h_loss, loss, batch_size * sizeof(float),
cudaMemcpyDeviceToHost));
float total_loss = 0.0;
for (int i = 0; i < batch_size; i++) total_loss += h_loss[i];
return total_loss / batch_size;
}
void NeuralNet::compareOutputCorrect(int *correct_count, int *y) {
*correct_count = 0;
if (data_type == CUDNN_DATA_FLOAT) {
float *typecast_O = (float *)layer_input[num_layers - 1];
inferClass<float><<<ceil(1.0 * batch_size / BW), BW>>>(
typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int),
cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1;
}
} else if (data_type == CUDNN_DATA_DOUBLE) {
double *typecast_O = (double *)layer_input[num_layers - 1];
inferClass<double><<<ceil(1.0 * batch_size / BW), BW>>>(
typecast_O, pred_y, batch_size, num_classes);
checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int),
cudaMemcpyDeviceToHost));
for (int i = 0; i < batch_size; i++) {
if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1;
}
}
}
NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type,
int batch_size, TensorFormat tensor_format,
long long dropout_seed, float softmax_eps,
float init_std_dev, vDNNType vdnn_type,
vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) {
// ---------------------- vDNN start ----------------------
checkCudaErrors(cudaStreamCreate(&stream_compute));
checkCudaErrors(cudaStreamCreate(&stream_memory));
this->vdnn_type = vdnn_type;
this->vdnn_conv_algo = vdnn_conv_algo;
// ---------------------- vDNN end ------------------------
// create handle
checkCUDNN(cudnnCreate(&cudnn_handle));
checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute));
checkCUBLAS(cublasCreate(&cublas_handle));
checkCUBLAS(cublasSetStream(cublas_handle, stream_compute));
checkCURAND(curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT));
checkCURAND(curandSetStream(curand_gen, stream_compute));
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
init_free_bytes = free_bytes;
std::cout << "Free bytes at start: " << free_bytes << std::endl;
pre_alloc_conv_derivative = false;
pre_alloc_fc_derivative = false;
pre_alloc_batch_norm_derivative = true;
if (vdnn_type == vDNN_NONE) {
pre_alloc_conv_derivative = true;
pre_alloc_fc_derivative = true;
pre_alloc_batch_norm_derivative = true;
}
if (data_type == DATA_FLOAT) {
this->data_type = CUDNN_DATA_FLOAT;
data_type_size = sizeof(float);
}
else if (data_type == DATA_DOUBLE) {
this->data_type = CUDNN_DATA_DOUBLE;
data_type_size = sizeof(double);
}
if (tensor_format == TENSOR_NCHW)
this->tensor_format = CUDNN_TENSOR_NCHW;
else if (tensor_format == TENSOR_NHWC)
this->tensor_format = CUDNN_TENSOR_NHWC;
this->batch_size = batch_size;
this->softmax_eps = softmax_eps;
this->init_std_dev = init_std_dev;
num_layers = layers.size();
// allocation of space for input to each layer
layer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int));
dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *));
params = (void **)malloc(num_layers * sizeof(void *));
LayerDimension prev_output_size;
LayerDimension current_output_size;
for (int i = 0; i < num_layers; i++) {
layer_type.push_back(layers[i].type);
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ConvLayerParams));
((ConvLayerParams *)params[i])
->initializeValues(cudnn_handle, user_params, this->data_type,
batch_size, this->tensor_format, data_type_size,
current_output_size, update_rule);
} else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
params[i] = malloc(sizeof(FCLayerParams));
((FCLayerParams *)params[i])
->initializeValues(user_params, batch_size, this->tensor_format,
this->data_type, current_output_size, update_rule);
} else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
params[i] = malloc(sizeof(DropoutLayerParams));
((DropoutLayerParams *)params[i])
->initializeValues(cudnn_handle, user_params, this->data_type,
batch_size, this->tensor_format,
current_output_size);
}
else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params =
(BatchNormDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((BatchNormLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size, update_rule);
} else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
params[i] = malloc(sizeof(BatchNormLayerParams));
((PoolingLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params =
(ActivationDescriptor *)layers[i].params;
params[i] = malloc(sizeof(ActivationLayerParams));
((ActivationLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
}
else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
params[i] = malloc(sizeof(SoftmaxLayerParams));
((SoftmaxLayerParams *)params[i])
->initializeValues(user_params, this->data_type, this->tensor_format,
batch_size, current_output_size);
// std::cout << current_output_size.N << ' ' << current_output_size.C <<
// current_output_size.H << current_output_size.W << std::endl;
}
if (i == 0) {
prev_output_size = current_output_size;
}
// incomplete - have to check flatten and check exact dimension
// else if (current_output_size.getTotalSize() !=
// prev_output_size.getTotalSize()) {
// std::cout << "Layer " << i << " output and next layer's input size
// mismatch\n";
// exit(0);
// }
}
// ---------------------- vDNN start ----------------------
// allocate space in host memory for layers to be transferred
h_layer_input = (void **)malloc(num_layers * sizeof(void *));
to_offload = (bool *)malloc(num_layers * sizeof(bool));
prefetched = (bool *)malloc(num_layers * sizeof(bool));
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just before allocate space: " << free_bytes
<< std::endl;
// allocate space for parameters
// Exception BatchNorm - looks like it will take lots of space if only FC
// layers - space taken = size of one input
for (int i = 0; i < num_layers; i++) {
size_t input_size;
if (layers[i].type == CONV) {
ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params;
((ConvLayerParams *)params[i])
->allocateSpace(curand_gen, this->data_type, data_type_size,
init_std_dev, free_bytes, pre_alloc_conv_derivative);
input_size = batch_size * user_params->input_channels *
user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
} else if (layers[i].type == FULLY_CONNECTED) {
FCDescriptor *user_params = (FCDescriptor *)layers[i].params;
((FCLayerParams *)params[i])
->allocateSpace(curand_gen, this->data_type, data_type_size,
init_std_dev, free_bytes, pre_alloc_fc_derivative);
input_size = batch_size * user_params->input_channels;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = 1;
input_w = 1;
}
} else if (layers[i].type == DROPOUT) {
DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params;
((DropoutLayerParams *)params[i])
->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == BATCHNORM) {
BatchNormDescriptor *user_params =
(BatchNormDescriptor *)layers[i].params;
((BatchNormLayerParams *)params[i])
->allocateSpace(this->data_type, data_type_size, free_bytes,
pre_alloc_batch_norm_derivative);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == POOLING) {
PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params;
((PoolingLayerParams *)params[i])->allocateSpace(free_bytes);
input_size = batch_size * user_params->input_channels *
user_params->input_h * user_params->input_w;
if (i == 0) {
input_channels = user_params->input_channels;
input_h = user_params->input_h;
input_w = user_params->input_w;
}
} else if (layers[i].type == ACTV) {
ActivationDescriptor *user_params =
(ActivationDescriptor *)layers[i].params;
((ActivationLayerParams *)params[i])->allocateSpace(free_bytes);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
} else if (layers[i].type == SOFTMAX) {
SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params;
((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes);
input_size =
batch_size * user_params->channels * user_params->h * user_params->w;
// assuming this is last layer, allocate for next layer as well
// checkCudaErrors(cudaMalloc(&layer_input[i + 1], input_size *
// data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i + 1], input_size *
// data_type_size));
layer_input_size[i + 1] = input_size;
if (i == 0) {
input_channels = user_params->channels;
input_h = user_params->h;
input_w = user_params->w;
}
if (i == num_layers - 1) {
num_classes = user_params->channels;
}
}
// do not allocate memory initially
// checkCudaErrors(cudaMalloc(&layer_input[i], input_size *
// data_type_size));
// checkCudaErrors(cudaMalloc(&dlayer_input[i], input_size *
// data_type_size));
// ---------------------- vDNN start ----------------------
layer_input_size[i] = input_size;
// ---------------------- vDNN end ------------------------
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
std::cout << "Free bytes just after allocate space: " << free_bytes
<< std::endl;
// very small - could be allocated initially itself
checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&pred_y, batch_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMalloc(&one_vec, batch_size * data_type_size));
if (this->data_type == CUDNN_DATA_FLOAT)
fillValue<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)one_vec,
batch_size, 1);
else
fillValue<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)one_vec,
batch_size, 1);
checkCudaErrors(cudaMallocHost((void **)&h_loss, batch_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&h_pred_y, batch_size * sizeof(int)));
// do not allocate workspace initially
// allocate space for workspace and also keep track of algo
// size_t cur_workspace_size;
// workspace_size = 0;
// for (int i = 0; i < num_layers; i++) {
// if (layers[i].type == CONV) {
// ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size,
// free_bytes);
// if (cur_workspace_size > workspace_size)
// workspace_size = cur_workspace_size;
// }
// }
// checkCudaErrors(cudaMalloc(&workspace, workspace_size));
// free_bytes = free_bytes - workspace_size;
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes));
// leave 600 MB and use the rest
std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN start ----------------------
size_t exp_max_consume, max_consume;
vDNNOptimize(exp_max_consume, max_consume);
std::cout << "actual_max_consume: " << max_consume << std::endl;
std::cout << "exp_max_consume: " << exp_max_consume << std::endl;
std::cout << "diff_max_consume(MB): "
<< (max_consume - exp_max_consume) / (1.0 * 1024 * 1024)
<< std::endl;
std::cout << "exp_free_bytes(MB): "
<< (free_bytes - exp_max_consume) / (1.0 * 1024 * 1024)
<< std::endl;
std::cout << "exp_total_consume(MB): "
<< (init_free_bytes - (free_bytes - exp_max_consume)) /
(1.0 * 1024 * 1024)
<< std::endl;
std::cout << "actual_total_consume(MB): "
<< (init_free_bytes - (free_bytes - max_consume)) /
(1.0 * 1024 * 1024)
<< std::endl;
// ---------------------- vDNN end ------------------------
// remove later
exit(0);
// ---------------------- vDNN start ----------------------
free_bytes = max_consume;
cnmemDevice_t cnmem_device;
size_t cnmem_stream_memory_size = free_bytes;
cnmem_device.device = 0;
cnmem_device.size = cnmem_stream_memory_size;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
// do not allow call to cudaMalloc
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
// ---------------------- vDNN end ------------------------
// ---------------------- vDNN start ----------------------
for (int i = 0; i < num_layers; i++) {
std::cerr << "to_offload[i] " << to_offload[i] << std::endl;
}
for (int i = 0; i < num_layers; i++) {
// allocate pinned memory in host
if (to_offload[i])
checkCudaErrors(cudaMallocHost(&h_layer_input[i],
layer_input_size[i] * data_type_size));
}
// ---------------------- vDNN end ------------------------
checkCudaErrors(cudaDeviceSynchronize());
size_t temp_free_bytes;
checkCudaErrors(cudaMemGetInfo(&temp_free_bytes, &total_bytes));
std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes
<< std::endl;
// {
// int n;
// std::cout << "waiting..\n";
// std::cin >> n;
// }
// data of time
checkCudaErrors(cudaEventCreate(&start_compute));
checkCudaErrors(cudaEventCreate(&stop_compute));
checkCudaErrors(cudaEventCreate(&start_transfer));
checkCudaErrors(cudaEventCreate(&stop_transfer));
}
bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref,
bool hard, size_t &exp_max_consume,
size_t &max_consume) {
CnmemSpace space_tracker(free_bytes);
max_consume = 0;
// forward pass
// allocate space for 1st input
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed()
<< std::endl;
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[0] * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
std::cerr << "Forward pass" << std::endl;
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX) break;
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed()
<< std::endl;
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after output allocation(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard,
cur_workspace_size));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
space_tracker.updateMaxConsume(max_consume);
if (!space_tracker.isAvailable()) return false;
std::cerr << "Used space after workspace allocation(MB): "
<< space_tracker.getConsumed() << std::endl;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after workspace deallocation(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
// deallocate layer input
if (to_offload[i]) {
std::cerr << "deallocating input to " << i << std::endl;
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
std::cerr << "Backward pass" << std::endl;
if (batch_size * num_classes * data_type_size !=
layer_input_size[num_layers] * data_type_size) {
std::cout << "Panic!! Using wrong size\n";
exit(0);
}
// backward pass
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[num_layers] * data_type_size);
std::cerr << "Used space after allocating final derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
// std::cerr << "max_consume: " << max_consume << std::endl;
for (int i = num_layers - 1; i >= 0; i--) {
// allocate space for previous layer derivative
std::cerr << "Processing layer " << i << std::endl;
std::cerr << "Used space initial(MB): " << space_tracker.getConsumed()
<< std::endl;
if (i > 0) {
if (layer_type[i] == SOFTMAX)
continue;
else {
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after allocating prev. derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
// std::cerr << "max_consume: " << max_consume << std::endl;
}
int layer_to_prefetch = findPrefetchLayer(i);
// if layer to be prefetched, allocate space for that layer
if (layer_to_prefetch != -1) {
std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl;
space_tracker.updateSpace(
CnmemSpace::SUB,
layer_input_size[layer_to_prefetch] * data_type_size);
std::cerr << "Used space after allocating prefetch(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
}
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
size_t cur_filter_workspace_size;
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref,
hard, cur_filter_workspace_size));
size_t cur_data_workspace_size = 0;
if (i > 0)
checkWORKSPACE(cur_params->getWorkspaceSize(
space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref,
hard, cur_data_workspace_size));
size_t cur_workspace_size =
(cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
std::cerr << "Used space after allocating workspace(MB): "
<< space_tracker.getConsumed() << std::endl;
space_tracker.updateMaxConsume(max_consume);
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
// std::cerr << "max_consume: " << max_consume << std::endl;
if (!space_tracker.isAvailable()) return false;
// current layer computation over, deallocate workspace
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
std::cerr << "Used space after deallocating workspace(MB): "
<< space_tracker.getConsumed() << std::endl;
if (!pre_alloc_conv_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
} else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(
CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
if (!pre_alloc_fc_derivative) {
space_tracker.updateSpace(
CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
} else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateMaxConsume(max_consume);
std::cerr << "Used space after allocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
if (!space_tracker.isAvailable()) return false;
if (!pre_alloc_batch_norm_derivative) {
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
std::cerr << "Used space after deallocating weight derv.(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
if (!space_tracker.isAvailable()) return false;
// deallocate layer output and derivative
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
std::cerr << "Used space after deallocating output, derivative(MB): "
<< space_tracker.getConsumed() << std::endl;
// if 1st layer, deallocate input layer also
if (i == 0) {
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
std::cerr << "Used space after deallocating input(MB): "
<< space_tracker.getConsumed() << std::endl;
}
}
if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n";
if (space_tracker.getConsumed() != 0)
std::cerr << "Panic!! bytes not freed properly\n";
// return true;
exp_max_consume = max_consume;
// check with cnmem once
bool ret_val = simulateCNMEMMemory(max_consume);
return ret_val;
}
bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) {
size_t init_max_consume = max_consume;
cnmemDevice_t cnmem_device;
size_t t;
checkCudaErrors(cudaMemGetInfo(&free_bytes, &t));
std::cout << "free_bytes: " << free_bytes << std::endl;
free_bytes -= 100 * 1024 * 1024;
cnmem_device.device = 0;
cnmem_device.numStreams = 0;
cnmem_device.streams = NULL;
cnmem_device.streamSizes = NULL;
std::string cnmem_memory_state_filename;
if (vdnn_type == vDNN_ALL) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_p.dat";
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_all_m.dat";
}
} else if (vdnn_type == vDNN_CONV) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_p.dat";
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
cnmem_memory_state_filename = "cnmem_conv_m.dat";
}
} else if (vdnn_type == vDNN_DYN) {
cnmem_memory_state_filename = "cnmem_dyn.dat";
} else {
cnmem_memory_state_filename = "cnmem_unknown.dat";
}
FILE *cnmem_memory_state_fptr =
fopen(cnmem_memory_state_filename.c_str(), "w");
size_t run_count = 0;
bool out_of_memory = false;
while (true) {
run_count++;
if (max_consume >= free_bytes) break;
out_of_memory = false;
cnmem_device.size = max_consume;
std::cerr << run_count << ' ' << max_consume << std::endl;
if (max_consume > free_bytes)
std::cerr << "panic!! max_consume > free_bytes\n";
checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW));
resetPrefetched();
fprintf(
cnmem_memory_state_fptr,
"//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count);
fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume);
fprintf(
cnmem_memory_state_fptr,
"//////////////////////////////////////////////////////////////////\n");
fprintf(cnmem_memory_state_fptr, "initial state\n");
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemMalloc(&layer_input[0],
layer_input_size[0] * data_type_size, NULL),
layer_input_size[0] * data_type_size, max_consume, free_bytes,
checkCNMEM(cnmemFinalize());
continue, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. layer_input[%d] - size: %lu\n", 0,
layer_input_size[0] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEMSim(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. layer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break,
out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. conv. workspace - size: %lu\n",
cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (to_offload[i]) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
}
if (out_of_memory) {
checkCNMEM(cnmemFinalize());
if (max_consume < free_bytes)
continue;
else
break;
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers],
batch_size * num_classes * data_type_size, NULL),
layer_input_size[num_layers] * data_type_size, max_consume,
free_bytes, checkCNMEM(cnmemFinalize());
continue, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. dlayer_input[%d] - size: %lu\n", num_layers,
layer_input_size[num_layers] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEMSim(
cnmemMalloc(
&layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, NULL),
layer_input_size[layer_to_prefetch] * data_type_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. prefetch layer_input[%d] - size: %lu\n",
layer_to_prefetch,
layer_input_size[layer_to_prefetch] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
checkCNMEMSim(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. dlayer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n",
cur_params->kernel_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n",
cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
cur_workspace_size =
(cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL),
cur_workspace_size, max_consume, free_bytes, break,
out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after alloc. conv. workspace - size: %lu\n",
cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n",
cur_params->weight_matrix_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n",
cur_params->C_out * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
if (!cur_params->cnmemAllocDerivativesCheck(
data_type_size, NULL, max_consume, free_bytes, out_of_memory))
break;
fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n",
cur_params->allocation_size * data_type_size);
fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n",
cur_params->allocation_size * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
continue;
}
if (layer_type[i] == CONV) {
checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size,
max_consume, free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free conv. workspace - size: %lu\n", cur_workspace_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
} else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n",
(long unsigned)0);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL),
layer_input_size[i + 1] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free dlayer_input[%d] - size: %lu\n", i + 1,
layer_input_size[i + 1] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
if (i == 0) {
checkCNMEMSim(cnmemFree(layer_input[i], NULL),
layer_input_size[i] * data_type_size, max_consume,
free_bytes, break, out_of_memory);
fprintf(cnmem_memory_state_fptr,
"after free layer_input[%d] - size: %lu\n", i,
layer_input_size[i] * data_type_size);
cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL);
}
}
checkCNMEM(cnmemFinalize());
if (out_of_memory) {
if (max_consume < free_bytes)
continue;
else
break;
}
break;
}
free_bytes += 100 * 1024 * 1024;
if (max_consume < free_bytes) {
double exp_size =
(init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
double act_size =
(max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024);
fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n",
exp_size);
fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n",
act_size);
} else {
fprintf(cnmem_memory_state_fptr, "out of memory\n");
}
fclose(cnmem_memory_state_fptr);
if (max_consume < free_bytes)
return true;
else
return false;
}
void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) {
bool hard = true, soft = false;
// if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available
if (vdnn_type == vDNN_ALL) {
setOffload(OFFLOAD_ALL);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
} else if (vdnn_type == vDNN_CONV) {
setOffload(OFFLOAD_CONV);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
} else if (vdnn_type == vDNN_NONE) {
setOffload(OFFLOAD_NONE);
resetPrefetched();
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
} else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
}
return;
}
if (vdnn_type == vDNN_DYN) {
// check for trainability
std::cerr << "vDNN_DYN\n";
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume))
outOfMemory();
// check if work with fastest algo and no offload, if so, select it and
// return
setOffload(NeuralNet::OFFLOAD_NONE);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n";
return;
}
// check if conv offload and fastest algo works, then check if all offload
// and fastest algo works
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n";
return;
}
// optimize using greedy algo memory usage while improving performance
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft,
exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft,
exp_max_consume, max_consume)) {
std::cerr << "Choosing GREEDY, ALL OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_CONV);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n";
return;
}
setOffload(NeuralNet::OFFLOAD_ALL);
resetPrefetched();
if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard,
exp_max_consume, max_consume)) {
std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n";
return;
}
}
exit(0);
}
void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) {
if (offload_type == OFFLOAD_NONE) {
for (int i = 0; i < num_layers; i++) to_offload[i] = false;
} else if (offload_type == OFFLOAD_CONV) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == CONV)
to_offload[i] = true;
else
to_offload[i] = false;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
} else if (offload_type == OFFLOAD_ALL) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX)
to_offload[i] = false;
else
to_offload[i] = true;
}
// set last non SOFTMAX/ACTV layer to no_offload
for (int i = num_layers - 1; i >= 0; i--) {
if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV)
;
else {
to_offload[i] = false;
break;
}
}
}
}
void NeuralNet::resetPrefetched() {
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train,
int *correct_count, float *loss) {
std::vector<float> t1, t2;
this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss);
}
void NeuralNet::getLoss(void *X, int *y, double learning_rate,
std::vector<float> &fwd_vdnn_lag,
std::vector<float> &bwd_vdnn_lag, bool train,
int *correct_count, float *scalar_loss) {
CnmemSpace space_tracker(free_bytes);
// std::cout << "here\n";
// std::cout << "Free bytes: " << free_bytes << std::endl;
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
checkCNMEM(
cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[0] * data_type_size);
checkCudaErrors(cudaMemcpy(
layer_input[0], X,
batch_size * input_channels * input_h * input_w * data_type_size,
cudaMemcpyHostToDevice));
if (train == true) {
checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size,
cudaMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
if (train == false && i == num_layers - 1) break;
// ---------------------- vDNN start ----------------------
size_t cur_workspace_size;
void *cur_workspace;
// offload if required
if (i > 0 && to_offload[i] && train == true)
checkCudaErrors(cudaMemcpyAsync(h_layer_input[i], layer_input[i],
layer_input_size[i] * data_type_size,
cudaMemcpyDeviceToHost, stream_memory));
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i + 1] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
// std::cout << "here" << i << std::endl;
if (layer_type[i] == CONV) {
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc,
cur_params->b, &alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W,
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1],
cur_params->C_out));
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W,
cur_params->C_out, (double *)layer_input[i], cur_params->C_in,
&Dbeta, (double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1],
cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
// std::cout << "FChere" << i << std::endl;
} else if (layer_type[i] == DROPOUT) {
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
layer_input[i], cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space, cur_params->reserved_space_size));
} else if (layer_type[i] == BATCHNORM) {
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (train == true) {
checkCUDNN(cudnnBatchNormalizationForwardTraining(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->factor, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
} else {
checkCUDNN(cudnnBatchNormalizationForwardInference(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon));
}
} else if (layer_type[i] == POOLING) {
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->output_tensor, layer_input[i + 1]));
} else if (layer_type[i] == ACTV) {
// std::cout << "Actv\n";
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1]));
} else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
if (train == true) {
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(cudaDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to
// synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
if (train == true) {
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
i--;
}
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train) clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 +
(end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
fwd_vdnn_lag.push_back(lag);
}
// std::cout << "EndSynchere" << i << std::endl;
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
if (to_offload[i] && train == true) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
if (train == false) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// std::cout << "EndSynchere" << i << std::endl;
// ---------------------- vDNN end ------------------------
}
// std::cout << "here" << std::endl;
if (train == false) {
compareOutputCorrect(correct_count, y);
checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL));
space_tracker.updateSpace(
CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size);
return;
}
*scalar_loss = computeLoss();
// ---------------------- vDNN start ----------------------
checkCNMEM(cnmemMalloc(&dlayer_input[num_layers],
batch_size * num_classes * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[num_layers] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// ---------------------- vDNN end ------------------------
if (layer_type[num_layers - 1] == SOFTMAX) {
// SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers
// - 1];
if (data_type == CUDNN_DATA_FLOAT) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0,
batch_size * num_classes * sizeof(float)));
softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(
this->y, (float *)layer_input[num_layers],
(float *)dlayer_input[num_layers], batch_size, num_classes,
softmax_eps);
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0,
batch_size * num_classes * sizeof(double)));
softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(
this->y, (double *)layer_input[num_layers],
(double *)dlayer_input[num_layers], batch_size, num_classes,
softmax_eps);
}
}
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
int layer_to_prefetch = findPrefetchLayer(i);
if (layer_to_prefetch != -1) {
checkCNMEM(cnmemMalloc(
&layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size, NULL));
space_tracker.updateSpace(
CnmemSpace::SUB,
layer_input_size[layer_to_prefetch] * data_type_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
if (layer_to_prefetch != 0) {
checkCudaErrors(cudaMemcpyAsync(
layer_input[layer_to_prefetch],
h_layer_input[layer_to_prefetch],
layer_input_size[layer_to_prefetch] * data_type_size,
cudaMemcpyHostToDevice, stream_memory));
} else {
// std::cout << "transfer here\n";
checkCudaErrors(cudaMemcpyAsync(
layer_input[layer_to_prefetch], X,
layer_input_size[layer_to_prefetch] * data_type_size,
cudaMemcpyHostToDevice, stream_memory));
// std::cout << "transfer here\n";
}
}
checkCNMEM(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL));
space_tracker.updateSpace(CnmemSpace::SUB,
layer_input_size[i] * data_type_size);
}
// std::cout << "Free bytes: " << free_bytes << std::endl;
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
// std::cout << "here\n";
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
checkCUDNN(cudnnConvolutionBackwardBias(
cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1],
&beta, cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc,
cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->filter_desc, cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(
cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace,
cur_workspace_size, &beta, cur_params->input_tensor,
dlayer_input[i]));
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(
CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->C_out * data_type_size);
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size, &Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size, &Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out,
cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1],
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha, (float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta, (float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1,
batch_size, &Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)one_vec, batch_size, &Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)layer_input[i],
cur_params->C_in, &Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha, (double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta, (double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::SUB,
cur_params->allocation_size * data_type_size);
}
checkCUDNN(cudnnBatchNormalizationBackward(
cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale, cur_params->dscale,
cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean,
cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1], &beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(cudaDeviceSynchronize());
struct timespec start_time, end_time;
checkCudaErrors(cudaStreamSynchronize(stream_compute));
if (train) clock_gettime(CLOCK_MONOTONIC, &start_time);
checkCudaErrors(cudaStreamSynchronize(stream_memory));
if (train) {
clock_gettime(CLOCK_MONOTONIC, &end_time);
float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 +
(end_time.tv_nsec - start_time.tv_nsec) * 1e-6;
bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag);
}
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->kernel_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(
CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->C_out * data_type_size);
}
} else if (layer_type[i] == BATCHNORM) {
if (train == true and !pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
space_tracker.updateSpace(CnmemSpace::ADD,
cur_params->allocation_size * data_type_size);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i + 1] * data_type_size);
if (i == 0) {
checkCNMEM(cnmemFree(layer_input[i], NULL));
space_tracker.updateSpace(CnmemSpace::ADD,
layer_input_size[i] * data_type_size);
}
// ---------------------- vDNN end ------------------------
}
if (space_tracker.getConsumed() != 0) {
std::cout << "Panic!! Space not updated properly\n";
}
// exit(0);
}
int NeuralNet::findPrefetchLayer(int cur_layer) {
for (int i = cur_layer - 1; i >= 0; i--) {
if (to_offload[i] && !prefetched[i]) {
prefetched[i] = true;
return i;
} else if (layer_type[i] == CONV) {
return -1;
}
}
return -1;
}
|
ec5acd4ebd1fed3076ee5901a55da8d42b1fcc74.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#define MODID pre
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
#include "gradops_cdf.cuh"
#include "dervfields_cdf.cuh"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
__global__ void computedervfields_parallel(struct params *p, real *w, real *wmod,
real *dwn1, real *wd, int order,int ordero)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
if(order == 0)
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=rho; f<=b2; f++)
wmod[fencode_cdf(p,i,j,f)+((p->n[0]))*((p->n[1]))*NVAR]=wmod[fencode_cdf(p,i,j,f)];
}
__syncthreads();
}
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=vel1; f<NDERV; f++)
;// wd[fencode_cdf(p,i,j,f)]=0;
for(int f=rho; f<NVAR; f++)
;// dwn1[fencode_cdf(p,i,j,f)]=0;
}
__syncthreads();
}
//if(i>20 && j >20 && i<90 && j<90)
// {
// computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
// computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
//}
// __syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
#ifdef USE_VAC
if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
#ifdef USE_SAC
if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
;// computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
__syncthreads();
}
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
#ifdef ADIABHYDRO
computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#else
//computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computebdotv_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computedivb_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
}
__syncthreads();
}
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
}
__syncthreads();
}
if(iindex==0)
{
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
p->cmax=0.0;
for(i>1;i<((p->n[0])-2);i++)
for(j>1;j<((p->n[1])-2);j++)
{
computecmax_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
}
// }
}
__syncthreads();
/*if(i<(p->n[0]) && j<(p->n[1]))
{
// for(int f=vel1; f<NDERV; f++)
for(int f=current1; f<=current2; f++)
bc_cont_cdf(wd,p,i,j,f);
}
__syncthreads();*/
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cdf(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucomputedervfields(struct params **p, real **w, struct params **d_p, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hipLaunchKernelGGL(( computedervfields_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
hipDeviceSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//hipDeviceSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//following used for testing to check current soundspeeds etc
//hipMemcpy(*w, *d_wd, 7*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
ec5acd4ebd1fed3076ee5901a55da8d42b1fcc74.cu
|
//#define MODID pre
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
#include "gradops_cdf.cuh"
#include "dervfields_cdf.cuh"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
__global__ void computedervfields_parallel(struct params *p, real *w, real *wmod,
real *dwn1, real *wd, int order,int ordero)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
// real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
if(order == 0)
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=rho; f<=b2; f++)
wmod[fencode_cdf(p,i,j,f)+((p->n[0]))*((p->n[1]))*NVAR]=wmod[fencode_cdf(p,i,j,f)];
}
__syncthreads();
}
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=vel1; f<NDERV; f++)
;// wd[fencode_cdf(p,i,j,f)]=0;
for(int f=rho; f<NVAR; f++)
;// dwn1[fencode_cdf(p,i,j,f)]=0;
}
__syncthreads();
}
//if(i>20 && j >20 && i<90 && j<90)
// {
// computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
// computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
//}
// __syncthreads();
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
#ifdef USE_VAC
if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
#ifdef USE_SAC
if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
;// computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
__syncthreads();
}
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
{
#ifdef ADIABHYDRO
computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#else
//computej_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computepk_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computept_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computebdotv_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
computedivb_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
#endif
}
__syncthreads();
}
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if(i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
//determin cmax
computec_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
}
__syncthreads();
}
if(iindex==0)
{
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
p->cmax=0.0;
for(i>1;i<((p->n[0])-2);i++)
for(j>1;j<((p->n[1])-2);j++)
{
computecmax_cdf(wmod+(order*((p->n[0]))*((p->n[1]))*NVAR),wd,p,i,j);
}
// }
}
__syncthreads();
/*if(i<(p->n[0]) && j<(p->n[1]))
{
// for(int f=vel1; f<NDERV; f++)
for(int f=current1; f<=current2; f++)
bc_cont_cdf(wd,p,i,j,f);
}
__syncthreads();*/
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cdf(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucomputedervfields(struct params **p, real **w, struct params **d_p, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
computedervfields_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_b,*d_u,*d_v,*d_h);
//printf("called prop\n");
cudaThreadSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called boundary\n");
//cudaThreadSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_b,*d_w,*d_wnew);
//printf("called update\n");
// cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//following used for testing to check current soundspeeds etc
//cudaMemcpy(*w, *d_wd, 7*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
2131545bd77bba7b913e3e7d25238ab4027702a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Prefetch_GPUToGPU"
auto Comm_UM_Prefetch_GPUToGPU = [](benchmark::State &state, const int src_gpu, const int dst_gpu) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
if (PRINT_IF_ERROR(scope::cuda_reset_device(src_gpu))) {
state.SkipWithError(NAME " failed to reset CUDA src device");
return;
}
if (PRINT_IF_ERROR(scope::cuda_reset_device(dst_gpu))) {
state.SkipWithError(NAME " failed to reset CUDA src device");
return;
}
if (PRINT_IF_ERROR(hipSetDevice(dst_gpu))) {
state.SkipWithError(NAME " failed to set CUDA dst device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(hipMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform hipMallocManaged");
return;
}
defer(hipFree(ptr));
if (PRINT_IF_ERROR(hipMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform hipMemset");
return;
}
hipEvent_t start, stop;
if (PRINT_IF_ERROR(hipEventCreate(&start))) {
state.SkipWithError(NAME " failed to create event");
return;
}
defer(hipEventDestroy(start));
if (PRINT_IF_ERROR(hipEventCreate(&stop))) {
state.SkipWithError(NAME " failed to create event");
return;
}
defer(hipEventDestroy(stop));
for (auto _ : state) {
hipMemPrefetchAsync(ptr, bytes, src_gpu);
hipSetDevice(src_gpu);
hipDeviceSynchronize();
hipSetDevice(dst_gpu);
hipDeviceSynchronize();
if (PRINT_IF_ERROR(hipGetLastError())) {
state.SkipWithError(NAME " failed to prep iteration");
return;
}
hipEventRecord(start);
if (PRINT_IF_ERROR(hipMemPrefetchAsync(ptr, bytes, dst_gpu))) {
state.SkipWithError(NAME " failed prefetch");
break;
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float millis = 0;
if (PRINT_IF_ERROR(hipEventElapsedTime(&millis, start, stop))) {
state.SkipWithError(NAME " failed to get elapsed time");
break;
}
state.SetIterationTime(millis / 1000);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["src_gpu"] = src_gpu;
state.counters["dst_gpu"] = dst_gpu;
};
static void registerer() {
const std::vector<Device> cudas = scope::system::cuda_devices();
for (size_t i = 0; i < cudas.size(); ++i) {
for (size_t j = i + 1; j < cudas.size(); ++j) {
auto src_gpu = cudas[i];
auto dst_gpu = cudas[j];
std::string name = std::string(NAME) + "/" + std::to_string(src_gpu) + "/" + std::to_string(dst_gpu);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Prefetch_GPUToGPU, src_gpu, dst_gpu)->SMALL_ARGS()->UseManualTime();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
|
2131545bd77bba7b913e3e7d25238ab4027702a1.cu
|
#if __CUDACC_VER_MAJOR__ >= 8
#include "scope/scope.hpp"
#include "args.hpp"
#define NAME "Comm_UM_Prefetch_GPUToGPU"
auto Comm_UM_Prefetch_GPUToGPU = [](benchmark::State &state, const int src_gpu, const int dst_gpu) {
const auto bytes = 1ULL << static_cast<size_t>(state.range(0));
if (PRINT_IF_ERROR(scope::cuda_reset_device(src_gpu))) {
state.SkipWithError(NAME " failed to reset CUDA src device");
return;
}
if (PRINT_IF_ERROR(scope::cuda_reset_device(dst_gpu))) {
state.SkipWithError(NAME " failed to reset CUDA src device");
return;
}
if (PRINT_IF_ERROR(cudaSetDevice(dst_gpu))) {
state.SkipWithError(NAME " failed to set CUDA dst device");
return;
}
char *ptr = nullptr;
if (PRINT_IF_ERROR(cudaMallocManaged(&ptr, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMallocManaged");
return;
}
defer(cudaFree(ptr));
if (PRINT_IF_ERROR(cudaMemset(ptr, 0, bytes))) {
state.SkipWithError(NAME " failed to perform cudaMemset");
return;
}
cudaEvent_t start, stop;
if (PRINT_IF_ERROR(cudaEventCreate(&start))) {
state.SkipWithError(NAME " failed to create event");
return;
}
defer(cudaEventDestroy(start));
if (PRINT_IF_ERROR(cudaEventCreate(&stop))) {
state.SkipWithError(NAME " failed to create event");
return;
}
defer(cudaEventDestroy(stop));
for (auto _ : state) {
cudaMemPrefetchAsync(ptr, bytes, src_gpu);
cudaSetDevice(src_gpu);
cudaDeviceSynchronize();
cudaSetDevice(dst_gpu);
cudaDeviceSynchronize();
if (PRINT_IF_ERROR(cudaGetLastError())) {
state.SkipWithError(NAME " failed to prep iteration");
return;
}
cudaEventRecord(start);
if (PRINT_IF_ERROR(cudaMemPrefetchAsync(ptr, bytes, dst_gpu))) {
state.SkipWithError(NAME " failed prefetch");
break;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millis = 0;
if (PRINT_IF_ERROR(cudaEventElapsedTime(&millis, start, stop))) {
state.SkipWithError(NAME " failed to get elapsed time");
break;
}
state.SetIterationTime(millis / 1000);
}
state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes));
state.counters["bytes"] = bytes;
state.counters["src_gpu"] = src_gpu;
state.counters["dst_gpu"] = dst_gpu;
};
static void registerer() {
const std::vector<Device> cudas = scope::system::cuda_devices();
for (size_t i = 0; i < cudas.size(); ++i) {
for (size_t j = i + 1; j < cudas.size(); ++j) {
auto src_gpu = cudas[i];
auto dst_gpu = cudas[j];
std::string name = std::string(NAME) + "/" + std::to_string(src_gpu) + "/" + std::to_string(dst_gpu);
benchmark::RegisterBenchmark(name.c_str(), Comm_UM_Prefetch_GPUToGPU, src_gpu, dst_gpu)->SMALL_ARGS()->UseManualTime();
}
}
}
SCOPE_AFTER_INIT(registerer, NAME);
#endif // __CUDACC_VER_MAJOR__ >= 8
|
da05a02c56c35bbe8c2ea57daab9655077e3f30c.hip
|
// !!! This is a file automatically generated by hipify!!!
/* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <hip/hip_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
__global__
void
cuda1DWaveEqnKernel(const unsigned int numberOfNodes, const float cfl2,
const float *oldDisplacements, const float *curDisplacements,
float *newDisplacements, const float left_boundary_value) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Warp divergence needed here: apply boundary condition at x_0^{n+1}
if (tid == 0) {
newDisplacements[tid] = left_boundary_value;
tid += gridDim.x * blockDim.x;
}
// Apply the centered-time, centered-space finite difference method
while (tid <= (numberOfNodes - 1) - 1) {
newDisplacements[tid] = 2 * curDisplacements[tid] -
oldDisplacements[tid]
+ cfl2 * (curDisplacements[tid+1] - 2*curDisplacements[tid] + curDisplacements[tid-1]);
tid += gridDim.x * blockDim.x;
}
// Apply fixed x_{n} boundary condition
if (tid == (numberOfNodes - 1)) {
newDisplacements[tid] = 0;
}
}
void cudaCall1DWaveEqnKernel(const unsigned int blocks,
const unsigned int threadsPerBlock, const unsigned int numberOfNodes,
const float cfl, const float *oldDisplacements,
const float *curDisplacements, float *newDisplacements,
const float left_boundary_value) {
const float cfl2 = cfl * cfl;
hipLaunchKernelGGL(( cuda1DWaveEqnKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, numberOfNodes, cfl2,
oldDisplacements, curDisplacements, newDisplacements,
left_boundary_value);
}
|
da05a02c56c35bbe8c2ea57daab9655077e3f30c.cu
|
/* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
__global__
void
cuda1DWaveEqnKernel(const unsigned int numberOfNodes, const float cfl2,
const float *oldDisplacements, const float *curDisplacements,
float *newDisplacements, const float left_boundary_value) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Warp divergence needed here: apply boundary condition at x_0^{n+1}
if (tid == 0) {
newDisplacements[tid] = left_boundary_value;
tid += gridDim.x * blockDim.x;
}
// Apply the centered-time, centered-space finite difference method
while (tid <= (numberOfNodes - 1) - 1) {
newDisplacements[tid] = 2 * curDisplacements[tid] -
oldDisplacements[tid]
+ cfl2 * (curDisplacements[tid+1] - 2*curDisplacements[tid] + curDisplacements[tid-1]);
tid += gridDim.x * blockDim.x;
}
// Apply fixed x_{n} boundary condition
if (tid == (numberOfNodes - 1)) {
newDisplacements[tid] = 0;
}
}
void cudaCall1DWaveEqnKernel(const unsigned int blocks,
const unsigned int threadsPerBlock, const unsigned int numberOfNodes,
const float cfl, const float *oldDisplacements,
const float *curDisplacements, float *newDisplacements,
const float left_boundary_value) {
const float cfl2 = cfl * cfl;
cuda1DWaveEqnKernel<<<blocks, threadsPerBlock>>>(numberOfNodes, cfl2,
oldDisplacements, curDisplacements, newDisplacements,
left_boundary_value);
}
|
1e531f2fe46d77f8beec3ec459b47363ad74b448.hip
|
// !!! This is a file automatically generated by hipify!!!
// ref: https://books.google.com/books?id=ODTaCgAAQBAJ&pg=PT562&lpg=PT562&dq=estimate+pi+with+cuda+thrust&source=bl&ots=IDMmU3Ld7Y&sig=dGojekciyDV6f7OoKwijXUZvSXk&hl=en&sa=X&authuser=0#v=onepage&q=estimate%20pi%20with%20cuda%20thrust&f=false
// https://github.com/ishanthilina/CUDA-Calculation-Experiements/blob/master/q1/pi-hiprand-thrust.cu
#include <iostream>
#include <sstream>
#include <cmath>
//#include <chrono>
//#include <thread>
#include <sys/time.h>
#include <omp.h>
#include <thrust/transform_reduce.h>
#include <thrust/random.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#include <thrust/system/cpp/execution_policy.h>
//#define UNUSED(x) (void)x;
#define UNUSED(x) [&x]{}()
typedef long long unsigned int UINT64;
// Generate pi trial.
//struct Genpit {
//private:
// thrust::default_random_engine rng_;
// thrust::uniform_real_distribution<float> dist_;
//
//public:
// Genpit() {
// thrust::default_random_engine rng(clock());
// thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
// rng_ = rng;
// dist_ = dist;
// }
//
// __host__ __device__ __forceinline__ float operator()(UINT64 id_or_seed)
// {
// float x, y;
// #ifdef __CUDA_ARCH__ // macro true on device and false on host.
// // thrust::default_random_engine rng(clock());
// // thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
// rng_.discard(id_or_seed);
// x = dist_(rng_);
// y = dist_(rng_);
// #else
// unsigned int rseed_ = id_or_seed;
// x = (float)rand_r(&rseed_)/RAND_MAX;
// y = (float)rand_r(&rseed_)/RAND_MAX;
// #endif
// return (x * x + y * y) <= 1.0f;
// }
//};
int main(int argc, char* argv[]) {
struct timeval t1, t2;
auto print_msg = [&] (const std::string & device, float pi) {
double tdelta =
(1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000000.0; // in s
std::cout << "\n" << device << " Est. pi = " << pi
<< "\nTime: " << tdelta << " s"
<< std::endl;
};
//int N = (1 << 31);
UINT64 N = 10000000; // = 0xFFFFFFFFF;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
// N = prop.totalGlobalMem * 0.75;
if (argc > 1) {
N = atof(argv[1]);
}
std::cout << "Trials: " << N << std::endl;
unsigned int rseed = time(NULL);
//unsigned int rseed =
// static_cast<uint64_t>(std::chrono::system_clock::to_time_t(
// std::chrono::system_clock::now()));
// generate pi trial lambda function.
auto genpit = [=] __host__ __device__ (UINT64 id_or_seed) {
float x, y;
#ifdef __CUDA_ARCH__ // macro true on device and false on host.
thrust::default_random_engine rng(clock());
thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
rng.discard(id_or_seed);
x = dist(rng);
y = dist(rng);
#else
unsigned int rseed_ = id_or_seed;
x = (float)rand_r(&rseed_)/RAND_MAX;
y = (float)rand_r(&rseed_)/RAND_MAX;
#endif
return (x * x + y * y) <= 1.0f;
};
std::cout << "calculate pi \n";
// GPU parallelization ####################################################
gettimeofday(&t1, 0);
float pi_count = thrust::transform_reduce(thrust::hip::par,
thrust::counting_iterator<UINT64>(0),
thrust::counting_iterator<UINT64>(N),
genpit,
0.0f, thrust::plus<float>());
hipDeviceSynchronize();
gettimeofday(&t2, 0);
// std::cout << "pi count: " << pi_count << std::endl;
float pi = pi_count * 4.f / (float) N;
print_msg("GPU", pi);
// CPP no parallelization #################################################
gettimeofday(&t1, 0);
pi_count = thrust::transform_reduce(thrust::cpp::par,
thrust::counting_iterator<UINT64>(rseed),
thrust::counting_iterator<UINT64>(rseed + N),
genpit,
0.0f, thrust::plus<float>());
gettimeofday(&t2, 0);
//std::cout << "CPP pi count: " << pi_count << std::endl;
pi = pi_count * 4.f / (float) N;
print_msg("CPP", pi);
// OMP parallelization ####################################################
gettimeofday(&t1, 0);
pi_count = thrust::transform_reduce(thrust::omp::par,
thrust::counting_iterator<UINT64>(rseed),
thrust::counting_iterator<UINT64>(rseed + N),
genpit,
0.0f, thrust::plus<float>());
gettimeofday(&t2, 0);
pi = pi_count * 4.f / (float) N;
print_msg("THRUST OMP", pi);
// Manual OMP parallelization #############################################
// The thrust::omp::par profile seems slow.
pi_count = 0;
gettimeofday(&t1, 0);
#pragma omp parallel for reduction(+:pi_count)
for (UINT64 i = 0; i < N; i++) {
pi_count += genpit(rseed + i);
}
gettimeofday(&t2, 0);
pi = pi_count * 4.f / (float) N;
print_msg("MANUAL OMP", pi);
// ########################################################################
//unsigned int total_threads = std::thread::hardware_concurrency();
//std::cout << "\nTotal threads " << total_threads << std::endl;
//std::stringstream msg;
//msg.str(std::string());
//msg << "OMP using " << nthreads << " threads.";
//print_msg(msg.str(), pi);
int nthreads = omp_get_max_threads(); // use OMP_NUM_THREADS to vary.
std::cout << "\nOMP using " << nthreads << " threads." << std::endl;
return 0;
}
|
1e531f2fe46d77f8beec3ec459b47363ad74b448.cu
|
// ref: https://books.google.com/books?id=ODTaCgAAQBAJ&pg=PT562&lpg=PT562&dq=estimate+pi+with+cuda+thrust&source=bl&ots=IDMmU3Ld7Y&sig=dGojekciyDV6f7OoKwijXUZvSXk&hl=en&sa=X&authuser=0#v=onepage&q=estimate%20pi%20with%20cuda%20thrust&f=false
// https://github.com/ishanthilina/CUDA-Calculation-Experiements/blob/master/q1/pi-curand-thrust.cu
#include <iostream>
#include <sstream>
#include <cmath>
//#include <chrono>
//#include <thread>
#include <sys/time.h>
#include <omp.h>
#include <thrust/transform_reduce.h>
#include <thrust/random.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#include <thrust/system/cpp/execution_policy.h>
//#define UNUSED(x) (void)x;
#define UNUSED(x) [&x]{}()
typedef long long unsigned int UINT64;
// Generate pi trial.
//struct Genpit {
//private:
// thrust::default_random_engine rng_;
// thrust::uniform_real_distribution<float> dist_;
//
//public:
// Genpit() {
// thrust::default_random_engine rng(clock());
// thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
// rng_ = rng;
// dist_ = dist;
// }
//
// __host__ __device__ __forceinline__ float operator()(UINT64 id_or_seed)
// {
// float x, y;
// #ifdef __CUDA_ARCH__ // macro true on device and false on host.
// // thrust::default_random_engine rng(clock());
// // thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
// rng_.discard(id_or_seed);
// x = dist_(rng_);
// y = dist_(rng_);
// #else
// unsigned int rseed_ = id_or_seed;
// x = (float)rand_r(&rseed_)/RAND_MAX;
// y = (float)rand_r(&rseed_)/RAND_MAX;
// #endif
// return (x * x + y * y) <= 1.0f;
// }
//};
int main(int argc, char* argv[]) {
struct timeval t1, t2;
auto print_msg = [&] (const std::string & device, float pi) {
double tdelta =
(1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000000.0; // in s
std::cout << "\n" << device << " Est. pi = " << pi
<< "\nTime: " << tdelta << " s"
<< std::endl;
};
//int N = (1 << 31);
UINT64 N = 10000000; // = 0xFFFFFFFFF;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
// N = prop.totalGlobalMem * 0.75;
if (argc > 1) {
N = atof(argv[1]);
}
std::cout << "Trials: " << N << std::endl;
unsigned int rseed = time(NULL);
//unsigned int rseed =
// static_cast<uint64_t>(std::chrono::system_clock::to_time_t(
// std::chrono::system_clock::now()));
// generate pi trial lambda function.
auto genpit = [=] __host__ __device__ (UINT64 id_or_seed) {
float x, y;
#ifdef __CUDA_ARCH__ // macro true on device and false on host.
thrust::default_random_engine rng(clock());
thrust::uniform_real_distribution<float> dist(0.0f, 1.0f);
rng.discard(id_or_seed);
x = dist(rng);
y = dist(rng);
#else
unsigned int rseed_ = id_or_seed;
x = (float)rand_r(&rseed_)/RAND_MAX;
y = (float)rand_r(&rseed_)/RAND_MAX;
#endif
return (x * x + y * y) <= 1.0f;
};
std::cout << "calculate pi \n";
// GPU parallelization ####################################################
gettimeofday(&t1, 0);
float pi_count = thrust::transform_reduce(thrust::cuda::par,
thrust::counting_iterator<UINT64>(0),
thrust::counting_iterator<UINT64>(N),
genpit,
0.0f, thrust::plus<float>());
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
// std::cout << "pi count: " << pi_count << std::endl;
float pi = pi_count * 4.f / (float) N;
print_msg("GPU", pi);
// CPP no parallelization #################################################
gettimeofday(&t1, 0);
pi_count = thrust::transform_reduce(thrust::cpp::par,
thrust::counting_iterator<UINT64>(rseed),
thrust::counting_iterator<UINT64>(rseed + N),
genpit,
0.0f, thrust::plus<float>());
gettimeofday(&t2, 0);
//std::cout << "CPP pi count: " << pi_count << std::endl;
pi = pi_count * 4.f / (float) N;
print_msg("CPP", pi);
// OMP parallelization ####################################################
gettimeofday(&t1, 0);
pi_count = thrust::transform_reduce(thrust::omp::par,
thrust::counting_iterator<UINT64>(rseed),
thrust::counting_iterator<UINT64>(rseed + N),
genpit,
0.0f, thrust::plus<float>());
gettimeofday(&t2, 0);
pi = pi_count * 4.f / (float) N;
print_msg("THRUST OMP", pi);
// Manual OMP parallelization #############################################
// The thrust::omp::par profile seems slow.
pi_count = 0;
gettimeofday(&t1, 0);
#pragma omp parallel for reduction(+:pi_count)
for (UINT64 i = 0; i < N; i++) {
pi_count += genpit(rseed + i);
}
gettimeofday(&t2, 0);
pi = pi_count * 4.f / (float) N;
print_msg("MANUAL OMP", pi);
// ########################################################################
//unsigned int total_threads = std::thread::hardware_concurrency();
//std::cout << "\nTotal threads " << total_threads << std::endl;
//std::stringstream msg;
//msg.str(std::string());
//msg << "OMP using " << nthreads << " threads.";
//print_msg(msg.str(), pi);
int nthreads = omp_get_max_threads(); // use OMP_NUM_THREADS to vary.
std::cout << "\nOMP using " << nthreads << " threads." << std::endl;
return 0;
}
|
39d5299889951c7a0b1d7d19a622de0ac0fb5603.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaFlow.h"
/// source image
texture<float, 2, hipReadModeElementType> texInput;
__global__ void ComputeDerivKernel(int width, int height, int stride,
float *Ix, float *Iy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0;
// x derivative
t0 = tex2D(texInput, x - 2.0f * dx, y);
t0 -= tex2D(texInput, x - 1.0f * dx, y) * 8.0f;
t0 += tex2D(texInput, x + 1.0f * dx, y) * 8.0f;
t0 -= tex2D(texInput, x + 2.0f * dx, y);
t0 /= 12.0f;
Ix[pos] = t0;
// y derivative
t0 = tex2D(texInput, x, y - 2.0f * dy);
t0 -= tex2D(texInput, x, y - 1.0f * dy) * 8.0f;
t0 += tex2D(texInput, x, y + 1.0f * dy) * 8.0f;
t0 -= tex2D(texInput, x, y + 2.0f * dy);
t0 /= 12.0f;
Iy[pos] = (t0);
}
__global__ void ComputeDerivMaskKernel(int width, int height, int stride,
float *mask, float threshold)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0;
// x derivative
float ixderiv = (tex2D(texInput, x - 2.0f * dx, y) - tex2D(texInput, x - 1.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 1.0f * dx, y) * 8.0f - tex2D(texInput, x + 2.0f * dx, y))/12.0f;
float ixderiv2 = (tex2D(texInput, x - 4.0f * dx, y) - tex2D(texInput, x - 2.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 2.0f * dx, y) * 8.0f - tex2D(texInput, x + 4.0f * dx, y)) / 12.0f;
float ixderiv4 = (tex2D(texInput, x - 8.0f * dx, y) - tex2D(texInput, x - 4.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 4.0f * dx, y) * 8.0f - tex2D(texInput, x + 8.0f * dx, y)) / 12.0f;
//float ixderiv8 = (tex2D(texInput, x - 16.0f * dx, y) - tex2D(texInput, x - 8.0f * dx, y) * 8.0f
// + tex2D(texInput, x + 8.0f * dx, y) * 8.0f - tex2D(texInput, x + 16.0f * dx, y)) / 12.0f;
// y derivative
float iyderiv = (tex2D(texInput, x, y - 2.0f * dy) - tex2D(texInput, x, y - 1.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 1.0f * dy) * 8.0f - tex2D(texInput, x, y + 2.0f * dy))/12.0f;
float iyderiv2 = (tex2D(texInput, x, y - 4.0f * dy) - tex2D(texInput, x, y - 2.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 2.0f * dy) * 8.0f - tex2D(texInput, x, y + 4.0f * dy)) / 12.0f;
float iyderiv4 = (tex2D(texInput, x, y - 8.0f * dy) - tex2D(texInput, x, y - 4.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 4.0f * dy) * 8.0f - tex2D(texInput, x, y + 8.0f * dy)) / 12.0f;
//float iyderiv8 = (tex2D(texInput, x, y - 16.0f * dy) - tex2D(texInput, x, y - 8.0f * dy) * 8.0f
// + tex2D(texInput, x, y + 8.0f * dy) * 8.0f - tex2D(texInput, x, y + 16.0f * dy)) / 12.0f;
if ((ixderiv > threshold) || (iyderiv > threshold) || (ixderiv2 > threshold) || (iyderiv2 > threshold)
|| (ixderiv4 > threshold) || (iyderiv4 > threshold)){// || (ixderiv8 > threshold) || (iyderiv8 > threshold)) {
mask[pos] = 1.0f;
}
else mask[pos] = 0.0f;
}
void sor::CudaFlow::ComputeDeriv(float *I0,
int w, int h, int s,
float *Ix, float *Iy)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
texInput.addressMode[0] = hipAddressModeMirror;
texInput.addressMode[1] = hipAddressModeMirror;
texInput.filterMode = hipFilterModeLinear;
texInput.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, texInput, I0, w, h, s * sizeof(float));
ComputeDerivKernel << < blocks, threads >> >(w, h, s, Ix, Iy);
}
void sor::CudaFlow::ComputeDerivMask(float *I0, int w, int h, int s, float *mask, float threshold) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
texInput.addressMode[0] = hipAddressModeMirror;
texInput.addressMode[1] = hipAddressModeMirror;
texInput.filterMode = hipFilterModeLinear;
texInput.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, texInput, I0, w, h, s * sizeof(float));
ComputeDerivMaskKernel << < blocks, threads >> >(w, h, s, mask, threshold);
}
|
39d5299889951c7a0b1d7d19a622de0ac0fb5603.cu
|
#include "CudaFlow.h"
/// source image
texture<float, 2, cudaReadModeElementType> texInput;
__global__ void ComputeDerivKernel(int width, int height, int stride,
float *Ix, float *Iy)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0;
// x derivative
t0 = tex2D(texInput, x - 2.0f * dx, y);
t0 -= tex2D(texInput, x - 1.0f * dx, y) * 8.0f;
t0 += tex2D(texInput, x + 1.0f * dx, y) * 8.0f;
t0 -= tex2D(texInput, x + 2.0f * dx, y);
t0 /= 12.0f;
Ix[pos] = t0;
// y derivative
t0 = tex2D(texInput, x, y - 2.0f * dy);
t0 -= tex2D(texInput, x, y - 1.0f * dy) * 8.0f;
t0 += tex2D(texInput, x, y + 1.0f * dy) * 8.0f;
t0 -= tex2D(texInput, x, y + 2.0f * dy);
t0 /= 12.0f;
Iy[pos] = (t0);
}
__global__ void ComputeDerivMaskKernel(int width, int height, int stride,
float *mask, float threshold)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0;
// x derivative
float ixderiv = (tex2D(texInput, x - 2.0f * dx, y) - tex2D(texInput, x - 1.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 1.0f * dx, y) * 8.0f - tex2D(texInput, x + 2.0f * dx, y))/12.0f;
float ixderiv2 = (tex2D(texInput, x - 4.0f * dx, y) - tex2D(texInput, x - 2.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 2.0f * dx, y) * 8.0f - tex2D(texInput, x + 4.0f * dx, y)) / 12.0f;
float ixderiv4 = (tex2D(texInput, x - 8.0f * dx, y) - tex2D(texInput, x - 4.0f * dx, y) * 8.0f
+ tex2D(texInput, x + 4.0f * dx, y) * 8.0f - tex2D(texInput, x + 8.0f * dx, y)) / 12.0f;
//float ixderiv8 = (tex2D(texInput, x - 16.0f * dx, y) - tex2D(texInput, x - 8.0f * dx, y) * 8.0f
// + tex2D(texInput, x + 8.0f * dx, y) * 8.0f - tex2D(texInput, x + 16.0f * dx, y)) / 12.0f;
// y derivative
float iyderiv = (tex2D(texInput, x, y - 2.0f * dy) - tex2D(texInput, x, y - 1.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 1.0f * dy) * 8.0f - tex2D(texInput, x, y + 2.0f * dy))/12.0f;
float iyderiv2 = (tex2D(texInput, x, y - 4.0f * dy) - tex2D(texInput, x, y - 2.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 2.0f * dy) * 8.0f - tex2D(texInput, x, y + 4.0f * dy)) / 12.0f;
float iyderiv4 = (tex2D(texInput, x, y - 8.0f * dy) - tex2D(texInput, x, y - 4.0f * dy) * 8.0f
+ tex2D(texInput, x, y + 4.0f * dy) * 8.0f - tex2D(texInput, x, y + 8.0f * dy)) / 12.0f;
//float iyderiv8 = (tex2D(texInput, x, y - 16.0f * dy) - tex2D(texInput, x, y - 8.0f * dy) * 8.0f
// + tex2D(texInput, x, y + 8.0f * dy) * 8.0f - tex2D(texInput, x, y + 16.0f * dy)) / 12.0f;
if ((ixderiv > threshold) || (iyderiv > threshold) || (ixderiv2 > threshold) || (iyderiv2 > threshold)
|| (ixderiv4 > threshold) || (iyderiv4 > threshold)){// || (ixderiv8 > threshold) || (iyderiv8 > threshold)) {
mask[pos] = 1.0f;
}
else mask[pos] = 0.0f;
}
void sor::CudaFlow::ComputeDeriv(float *I0,
int w, int h, int s,
float *Ix, float *Iy)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
texInput.addressMode[0] = cudaAddressModeMirror;
texInput.addressMode[1] = cudaAddressModeMirror;
texInput.filterMode = cudaFilterModeLinear;
texInput.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, texInput, I0, w, h, s * sizeof(float));
ComputeDerivKernel << < blocks, threads >> >(w, h, s, Ix, Iy);
}
void sor::CudaFlow::ComputeDerivMask(float *I0, int w, int h, int s, float *mask, float threshold) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
texInput.addressMode[0] = cudaAddressModeMirror;
texInput.addressMode[1] = cudaAddressModeMirror;
texInput.filterMode = cudaFilterModeLinear;
texInput.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, texInput, I0, w, h, s * sizeof(float));
ComputeDerivMaskKernel << < blocks, threads >> >(w, h, s, mask, threshold);
}
|
a63a7a939e42462aceb56834a4c599efdcaec318.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cudaCompress/Huffman.h>
#include <cassert>
#include <hip/hip_runtime.h>
//#include <thrust/device_ptr.h>
//#include <thrust/scan.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/scan/scan_app.cui>
#include "HuffmanKernels.cui"
namespace cudaCompress {
size_t huffmanGetRequiredMemory(const Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountPerStreamMax = pInstance->m_elemCountPerStreamMax;
size_t sizeEncode = 0;
size_t sizeDecode = 0;
// encode: dpStreamInfos
sizeEncode += getAlignedSize(sizeof(HuffmanGPUStreamInfo) * streamCountMax, 128);
// encode: dpScratch
uint prefixCountMax = getPrefixCount(symbolCountPerStreamMax);
uint scratchBytes = (uint)getAlignedSize((prefixCountMax + 1) * sizeof(uint), 128);
sizeEncode += streamCountMax * getAlignedSize(scratchBytes, 128);
// encode: dppScratch
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint*), 128);
// encode: dpScanTotal
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint), 128);
// decode: dpStreamInfos
sizeDecode += getAlignedSize(sizeof(HuffmanGPUStreamInfo) * streamCountMax, 128);
return std::max<size_t>(sizeEncode, sizeDecode);
}
bool huffmanInit(Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
cudaSafeCall(hipHostMalloc(&pInstance->Huffman.pReadback, streamCountMax * sizeof(uint)));
cudaSafeCall(hipEventCreateWithFlags(&pInstance->Huffman.syncEventReadback, hipEventDisableTiming));
return true;
}
bool huffmanShutdown(Instance* pInstance)
{
cudaSafeCall(hipEventDestroy(pInstance->Huffman.syncEventReadback));
pInstance->Huffman.syncEventReadback = 0;
cudaSafeCall(hipHostFree(pInstance->Huffman.pReadback));
pInstance->Huffman.pReadback = NULL;
return true;
}
bool huffmanEncode(Instance* pInstance, const HuffmanGPUStreamInfo* pStreamInfos, uint streamCount, uint codingBlockSize, uint* pCompressedSizeBits)
{
assert(streamCount <= pInstance->m_streamCountMax);
bool longSymbols = (pInstance->m_log2HuffmanDistinctSymbolCountMax > 16);
uint prefixCountMax = 0;
uint offsetCountMax = 0;
for(uint i = 0; i < streamCount; i++) {
const HuffmanGPUStreamInfo& streamInfo = pStreamInfos[i];
assert(streamInfo.symbolCount <= pInstance->m_elemCountPerStreamMax);
uint prefixCount = getPrefixCount(streamInfo.symbolCount);
prefixCountMax = max(prefixCountMax, prefixCount);
uint offsetCount = (streamInfo.symbolCount + codingBlockSize - 1) / codingBlockSize;
offsetCountMax = max(offsetCountMax, offsetCount);
}
HuffmanGPUStreamInfo* dpStreamInfos = pInstance->getBuffer<HuffmanGPUStreamInfo>(streamCount);
uint scratchElems = (uint)getAlignedSize(prefixCountMax + 1, 128 / sizeof(uint));
uint* dpScratch = pInstance->getBuffer<uint>(streamCount * scratchElems);
uint** dppScratch = pInstance->getBuffer<uint*>(streamCount);
uint* dpScanTotal = pInstance->getBuffer<uint>(streamCount);
std::vector<uint*> pdpScratch(streamCount);
for(uint i = 0; i < streamCount; i++) {
pdpScratch[i] = dpScratch + i * scratchElems;
}
util::CudaScopedTimer timer(pInstance->Huffman.timerEncode);
timer("Upload Info");
cudaSafeCall(hipMemcpyAsync(dpStreamInfos, pStreamInfos, sizeof(HuffmanGPUStreamInfo) * streamCount, hipMemcpyHostToDevice, pInstance->m_stream));
// note: we don't sync on this upload - we trust that the caller won't overwrite/delete the array...
cudaSafeCall(hipMemcpyAsync(dppScratch, pdpScratch.data(), sizeof(uint*) * streamCount, hipMemcpyHostToDevice, pInstance->m_stream)); //TODO upload buffer?
// there's a sync in here later on, so this "should" be okay...
timer("Words to Lengths");
// get codeword lengths (of COMPACTIFY_ELEM_PER_THREAD consecutive codewords)
if(prefixCountMax > 0) {
uint blockSize = WORDS_TO_LENGTH_THREADS_PER_BLOCK;
dim3 blockCount((prefixCountMax + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
hipLaunchKernelGGL(( huffmanEncodeWordsToLengthKernel<Symbol32>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dppScratch, dpStreamInfos);
} else {
hipLaunchKernelGGL(( huffmanEncodeWordsToLengthKernel<Symbol16>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dppScratch, dpStreamInfos);
}
cudaCheckMsg("huffmanEncodeWordsToLengthKernel execution failed");
}
timer("Scan Lengths");
if(prefixCountMax > 0) {
// scan codeword lengths to get output indices
scanArray<uint, uint, true>(dpScratch, dpScratch, prefixCountMax + 1, streamCount, scratchElems, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("huffmanEncode: Error in scanArray");
// copy scan totals (= compressed bit sizes) into contiguous buffer for common download
uint blockSize = min(128u, streamCount);
uint blockCount = (streamCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( huffmanEncodeCopyScanTotalsKernel), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, streamCount, (const uint**)dppScratch, dpScanTotal);
cudaCheckMsg("huffmanEncodeCopyScanTotalsKernel execution failed");
// start readback of compressed size
cudaSafeCall(hipMemcpyAsync(pInstance->Huffman.pReadback, dpScanTotal, streamCount * sizeof(uint), hipMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(hipEventRecord(pInstance->Huffman.syncEventReadback, pInstance->m_stream));
}
timer("Collect Offsets");
if(offsetCountMax > 0) {
uint blockSize = min(128u, offsetCountMax);
dim3 blockCount((offsetCountMax + blockSize - 1) / blockSize, streamCount);
hipLaunchKernelGGL(( huffmanEncodeCollectOffsetsKernel), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, (const uint**)dppScratch, codingBlockSize);
cudaCheckMsg("huffmanEncodeCollectOffsetsKernel execution failed");
}
timer("Compactify");
if(prefixCountMax > 0) {
uint blockSize = COMPACTIFY_THREADS_PER_BLOCK;
dim3 blockCount((prefixCountMax + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
hipLaunchKernelGGL(( huffmanEncodeCompactifyKernel<Symbol32>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, (const uint**)dppScratch);
} else {
hipLaunchKernelGGL(( huffmanEncodeCompactifyKernel<Symbol16>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, (const uint**)dppScratch);
}
cudaCheckMsg("huffmanEncodeCompactifyKernel execution failed");
}
timer("Readback Sync");
if(prefixCountMax > 0) {
cudaSafeCall(hipEventSynchronize(pInstance->Huffman.syncEventReadback));
}
for(uint i = 0; i < streamCount; i++) {
const HuffmanGPUStreamInfo& streamInfo = pStreamInfos[i];
if(streamInfo.symbolCount == 0) {
pCompressedSizeBits[i] = 0;
} else {
pCompressedSizeBits[i] = pInstance->Huffman.pReadback[i];
}
}
timer();
pInstance->releaseBuffers(4);
return true;
}
bool huffmanDecode(Instance* pInstance, const HuffmanGPUStreamInfo* pStreamInfos, uint streamCount, uint codingBlockSize)
{
assert(streamCount <= pInstance->m_streamCountMax);
bool longSymbols = (pInstance->m_log2HuffmanDistinctSymbolCountMax > 16);
HuffmanGPUStreamInfo* dpStreamInfos = pInstance->getBuffer<HuffmanGPUStreamInfo>(streamCount);
util::CudaScopedTimer timer(pInstance->Huffman.timerDecode);
timer("Upload Info");
// upload stream infos
cudaSafeCall(hipMemcpyAsync(dpStreamInfos, pStreamInfos, sizeof(HuffmanGPUStreamInfo) * streamCount, hipMemcpyHostToDevice, pInstance->m_stream));
// note: we don't sync on this upload - we trust that the caller won't overwrite/delete the array...
timer("Decode");
// get max number of symbols
uint symbolCountPerStreamMax = 0;
for(uint i = 0; i < streamCount; i++)
symbolCountPerStreamMax = max(symbolCountPerStreamMax, pStreamInfos[i].symbolCount);
if(symbolCountPerStreamMax == 0) {
pInstance->releaseBuffer();
return true;
}
// launch decode kernel
uint threadCountPerStream = (symbolCountPerStreamMax + codingBlockSize - 1) / codingBlockSize;
uint blockSize = min(192u, threadCountPerStream);
blockSize = max(blockSize, HUFFMAN_LOOKUP_SIZE);
assert(blockSize >= HUFFMAN_LOOKUP_SIZE);
dim3 blockCount((threadCountPerStream + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
hipLaunchKernelGGL(( huffmanDecodeKernel<Symbol32>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, codingBlockSize);
} else {
hipLaunchKernelGGL(( huffmanDecodeKernel<Symbol16>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpStreamInfos, codingBlockSize);
}
cudaCheckMsg("huffmanDecodeKernel execution failed");
timer("Transpose");
// launch transpose kernel
dim3 blockSizeTranspose(TRANSPOSE_BLOCKDIM_X, TRANSPOSE_BLOCKDIM_Y);
dim3 blockCountTranspose((symbolCountPerStreamMax + WARP_SIZE * codingBlockSize - 1) / (WARP_SIZE * codingBlockSize), streamCount);
if(longSymbols) {
switch(codingBlockSize) {
case 32:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol32, 32>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 64:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol32, 64>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 128:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol32, 128>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 256:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol32, 256>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
default:
assert(false);
}
} else {
switch(codingBlockSize) {
case 32:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol16, 32>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 64:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol16, 64>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 128:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol16, 128>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
case 256:
hipLaunchKernelGGL(( huffmanDecodeTransposeKernel<Symbol16, 256>), dim3(blockCountTranspose), dim3(blockSizeTranspose), 0, pInstance->m_stream, dpStreamInfos);
break;
default:
assert(false);
}
}
cudaCheckMsg("huffmanDecodeTransposeKernel execution failed");
timer();
pInstance->releaseBuffer();
return true;
}
}
|
a63a7a939e42462aceb56834a4c599efdcaec318.cu
|
#include <cudaCompress/Huffman.h>
#include <cassert>
#include <cuda_runtime.h>
//#include <thrust/device_ptr.h>
//#include <thrust/scan.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/scan/scan_app.cui>
#include "HuffmanKernels.cui"
namespace cudaCompress {
size_t huffmanGetRequiredMemory(const Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountPerStreamMax = pInstance->m_elemCountPerStreamMax;
size_t sizeEncode = 0;
size_t sizeDecode = 0;
// encode: dpStreamInfos
sizeEncode += getAlignedSize(sizeof(HuffmanGPUStreamInfo) * streamCountMax, 128);
// encode: dpScratch
uint prefixCountMax = getPrefixCount(symbolCountPerStreamMax);
uint scratchBytes = (uint)getAlignedSize((prefixCountMax + 1) * sizeof(uint), 128);
sizeEncode += streamCountMax * getAlignedSize(scratchBytes, 128);
// encode: dppScratch
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint*), 128);
// encode: dpScanTotal
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint), 128);
// decode: dpStreamInfos
sizeDecode += getAlignedSize(sizeof(HuffmanGPUStreamInfo) * streamCountMax, 128);
return std::max<size_t>(sizeEncode, sizeDecode);
}
bool huffmanInit(Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
cudaSafeCall(cudaMallocHost(&pInstance->Huffman.pReadback, streamCountMax * sizeof(uint)));
cudaSafeCall(cudaEventCreateWithFlags(&pInstance->Huffman.syncEventReadback, cudaEventDisableTiming));
return true;
}
bool huffmanShutdown(Instance* pInstance)
{
cudaSafeCall(cudaEventDestroy(pInstance->Huffman.syncEventReadback));
pInstance->Huffman.syncEventReadback = 0;
cudaSafeCall(cudaFreeHost(pInstance->Huffman.pReadback));
pInstance->Huffman.pReadback = NULL;
return true;
}
bool huffmanEncode(Instance* pInstance, const HuffmanGPUStreamInfo* pStreamInfos, uint streamCount, uint codingBlockSize, uint* pCompressedSizeBits)
{
assert(streamCount <= pInstance->m_streamCountMax);
bool longSymbols = (pInstance->m_log2HuffmanDistinctSymbolCountMax > 16);
uint prefixCountMax = 0;
uint offsetCountMax = 0;
for(uint i = 0; i < streamCount; i++) {
const HuffmanGPUStreamInfo& streamInfo = pStreamInfos[i];
assert(streamInfo.symbolCount <= pInstance->m_elemCountPerStreamMax);
uint prefixCount = getPrefixCount(streamInfo.symbolCount);
prefixCountMax = max(prefixCountMax, prefixCount);
uint offsetCount = (streamInfo.symbolCount + codingBlockSize - 1) / codingBlockSize;
offsetCountMax = max(offsetCountMax, offsetCount);
}
HuffmanGPUStreamInfo* dpStreamInfos = pInstance->getBuffer<HuffmanGPUStreamInfo>(streamCount);
uint scratchElems = (uint)getAlignedSize(prefixCountMax + 1, 128 / sizeof(uint));
uint* dpScratch = pInstance->getBuffer<uint>(streamCount * scratchElems);
uint** dppScratch = pInstance->getBuffer<uint*>(streamCount);
uint* dpScanTotal = pInstance->getBuffer<uint>(streamCount);
std::vector<uint*> pdpScratch(streamCount);
for(uint i = 0; i < streamCount; i++) {
pdpScratch[i] = dpScratch + i * scratchElems;
}
util::CudaScopedTimer timer(pInstance->Huffman.timerEncode);
timer("Upload Info");
cudaSafeCall(cudaMemcpyAsync(dpStreamInfos, pStreamInfos, sizeof(HuffmanGPUStreamInfo) * streamCount, cudaMemcpyHostToDevice, pInstance->m_stream));
// note: we don't sync on this upload - we trust that the caller won't overwrite/delete the array...
cudaSafeCall(cudaMemcpyAsync(dppScratch, pdpScratch.data(), sizeof(uint*) * streamCount, cudaMemcpyHostToDevice, pInstance->m_stream)); //TODO upload buffer?
// there's a sync in here later on, so this "should" be okay...
timer("Words to Lengths");
// get codeword lengths (of COMPACTIFY_ELEM_PER_THREAD consecutive codewords)
if(prefixCountMax > 0) {
uint blockSize = WORDS_TO_LENGTH_THREADS_PER_BLOCK;
dim3 blockCount((prefixCountMax + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
huffmanEncodeWordsToLengthKernel<Symbol32><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dppScratch, dpStreamInfos);
} else {
huffmanEncodeWordsToLengthKernel<Symbol16><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dppScratch, dpStreamInfos);
}
cudaCheckMsg("huffmanEncodeWordsToLengthKernel execution failed");
}
timer("Scan Lengths");
if(prefixCountMax > 0) {
// scan codeword lengths to get output indices
scanArray<uint, uint, true>(dpScratch, dpScratch, prefixCountMax + 1, streamCount, scratchElems, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("huffmanEncode: Error in scanArray");
// copy scan totals (= compressed bit sizes) into contiguous buffer for common download
uint blockSize = min(128u, streamCount);
uint blockCount = (streamCount + blockSize - 1) / blockSize;
huffmanEncodeCopyScanTotalsKernel<<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, streamCount, (const uint**)dppScratch, dpScanTotal);
cudaCheckMsg("huffmanEncodeCopyScanTotalsKernel execution failed");
// start readback of compressed size
cudaSafeCall(cudaMemcpyAsync(pInstance->Huffman.pReadback, dpScanTotal, streamCount * sizeof(uint), cudaMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(cudaEventRecord(pInstance->Huffman.syncEventReadback, pInstance->m_stream));
}
timer("Collect Offsets");
if(offsetCountMax > 0) {
uint blockSize = min(128u, offsetCountMax);
dim3 blockCount((offsetCountMax + blockSize - 1) / blockSize, streamCount);
huffmanEncodeCollectOffsetsKernel<<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, (const uint**)dppScratch, codingBlockSize);
cudaCheckMsg("huffmanEncodeCollectOffsetsKernel execution failed");
}
timer("Compactify");
if(prefixCountMax > 0) {
uint blockSize = COMPACTIFY_THREADS_PER_BLOCK;
dim3 blockCount((prefixCountMax + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
huffmanEncodeCompactifyKernel<Symbol32><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, (const uint**)dppScratch);
} else {
huffmanEncodeCompactifyKernel<Symbol16><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, (const uint**)dppScratch);
}
cudaCheckMsg("huffmanEncodeCompactifyKernel execution failed");
}
timer("Readback Sync");
if(prefixCountMax > 0) {
cudaSafeCall(cudaEventSynchronize(pInstance->Huffman.syncEventReadback));
}
for(uint i = 0; i < streamCount; i++) {
const HuffmanGPUStreamInfo& streamInfo = pStreamInfos[i];
if(streamInfo.symbolCount == 0) {
pCompressedSizeBits[i] = 0;
} else {
pCompressedSizeBits[i] = pInstance->Huffman.pReadback[i];
}
}
timer();
pInstance->releaseBuffers(4);
return true;
}
bool huffmanDecode(Instance* pInstance, const HuffmanGPUStreamInfo* pStreamInfos, uint streamCount, uint codingBlockSize)
{
assert(streamCount <= pInstance->m_streamCountMax);
bool longSymbols = (pInstance->m_log2HuffmanDistinctSymbolCountMax > 16);
HuffmanGPUStreamInfo* dpStreamInfos = pInstance->getBuffer<HuffmanGPUStreamInfo>(streamCount);
util::CudaScopedTimer timer(pInstance->Huffman.timerDecode);
timer("Upload Info");
// upload stream infos
cudaSafeCall(cudaMemcpyAsync(dpStreamInfos, pStreamInfos, sizeof(HuffmanGPUStreamInfo) * streamCount, cudaMemcpyHostToDevice, pInstance->m_stream));
// note: we don't sync on this upload - we trust that the caller won't overwrite/delete the array...
timer("Decode");
// get max number of symbols
uint symbolCountPerStreamMax = 0;
for(uint i = 0; i < streamCount; i++)
symbolCountPerStreamMax = max(symbolCountPerStreamMax, pStreamInfos[i].symbolCount);
if(symbolCountPerStreamMax == 0) {
pInstance->releaseBuffer();
return true;
}
// launch decode kernel
uint threadCountPerStream = (symbolCountPerStreamMax + codingBlockSize - 1) / codingBlockSize;
uint blockSize = min(192u, threadCountPerStream);
blockSize = max(blockSize, HUFFMAN_LOOKUP_SIZE);
assert(blockSize >= HUFFMAN_LOOKUP_SIZE);
dim3 blockCount((threadCountPerStream + blockSize - 1) / blockSize, streamCount);
if(longSymbols) {
huffmanDecodeKernel<Symbol32><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, codingBlockSize);
} else {
huffmanDecodeKernel<Symbol16><<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpStreamInfos, codingBlockSize);
}
cudaCheckMsg("huffmanDecodeKernel execution failed");
timer("Transpose");
// launch transpose kernel
dim3 blockSizeTranspose(TRANSPOSE_BLOCKDIM_X, TRANSPOSE_BLOCKDIM_Y);
dim3 blockCountTranspose((symbolCountPerStreamMax + WARP_SIZE * codingBlockSize - 1) / (WARP_SIZE * codingBlockSize), streamCount);
if(longSymbols) {
switch(codingBlockSize) {
case 32:
huffmanDecodeTransposeKernel<Symbol32, 32><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 64:
huffmanDecodeTransposeKernel<Symbol32, 64><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 128:
huffmanDecodeTransposeKernel<Symbol32, 128><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 256:
huffmanDecodeTransposeKernel<Symbol32, 256><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
default:
assert(false);
}
} else {
switch(codingBlockSize) {
case 32:
huffmanDecodeTransposeKernel<Symbol16, 32><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 64:
huffmanDecodeTransposeKernel<Symbol16, 64><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 128:
huffmanDecodeTransposeKernel<Symbol16, 128><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
case 256:
huffmanDecodeTransposeKernel<Symbol16, 256><<<blockCountTranspose, blockSizeTranspose, 0, pInstance->m_stream>>>(dpStreamInfos);
break;
default:
assert(false);
}
}
cudaCheckMsg("huffmanDecodeTransposeKernel execution failed");
timer();
pInstance->releaseBuffer();
return true;
}
}
|
357f285635dc64f3a136bbd23f5978e327ac68ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "reverse_packed_segs_op.h"
namespace caffe2 {
namespace {
template <typename T, typename LengthType>
__global__
void ReversePackedSegments_kernel(
size_t max_length,
size_t batch_size,
size_t block_size,
const LengthType* lengths_ptr,
const T* data_ptr,
T* rev_data_ptr) {
const int block_id = blockIdx.x;
// index into [0, batch_size)
const int batch = block_id / max_length;
// index into [0, segment)
const int segment = block_id % max_length;
if (batch >= batch_size || segment >= max_length) return;
const int seg_length = lengths_ptr[batch];
// unique data pointer for this CTA
const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;
// unique pointer for result
T* local_rev_data_ptr;
if (segment < seg_length) {
local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;
} else {
local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;
}
// copy using 1 element / thread for now
for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {
local_rev_data_ptr[idx] = local_data_ptr[idx];
}
}
} // namespace
// specialization of DoRunWithLengthType
template <>
template <typename T, typename LengthType>
void ReversePackedSegsOp<CUDAContext>::DoRunWithLengthType() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE(
data.ndim() == 3,
"DATA should be 3-D tensor <lengths, "
"segments, embeddings>");
CAFFE_ENFORCE(lengths.ndim() == 1, "LENGTH should be 1-D");
auto* output = Output(0);
const auto& shape = data.dims();
output->Resize(shape);
const auto& max_length = data.dims()[0];
const auto& batch_size = data.dims()[1];
const auto& block_size = data.dims()[2];
CAFFE_ENFORCE(
lengths.dims()[0] == batch_size,
"lenths size should be"
" equal to batch size");
const T* data_ptr = data.template data<T>();
const LengthType* lengths_ptr = lengths.template data<LengthType>();
// reversed data
T* rev_data_ptr = output->template mutable_data<T>();
const int grid = max_length * batch_size;
hipLaunchKernelGGL(( ReversePackedSegments_kernel<T,LengthType>), dim3(grid), dim3(512), 0, context_.cuda_stream(),
max_length,
batch_size,
block_size,
lengths_ptr,
data_ptr,
rev_data_ptr);
}
REGISTER_CUDA_OPERATOR(ReversePackedSegs, ReversePackedSegsOp<CUDAContext>);
} // namespace caffe2
|
357f285635dc64f3a136bbd23f5978e327ac68ef.cu
|
#include "caffe2/core/context_gpu.h"
#include "reverse_packed_segs_op.h"
namespace caffe2 {
namespace {
template <typename T, typename LengthType>
__global__
void ReversePackedSegments_kernel(
size_t max_length,
size_t batch_size,
size_t block_size,
const LengthType* lengths_ptr,
const T* data_ptr,
T* rev_data_ptr) {
const int block_id = blockIdx.x;
// index into [0, batch_size)
const int batch = block_id / max_length;
// index into [0, segment)
const int segment = block_id % max_length;
if (batch >= batch_size || segment >= max_length) return;
const int seg_length = lengths_ptr[batch];
// unique data pointer for this CTA
const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;
// unique pointer for result
T* local_rev_data_ptr;
if (segment < seg_length) {
local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;
} else {
local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;
}
// copy using 1 element / thread for now
for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {
local_rev_data_ptr[idx] = local_data_ptr[idx];
}
}
} // namespace
// specialization of DoRunWithLengthType
template <>
template <typename T, typename LengthType>
void ReversePackedSegsOp<CUDAContext>::DoRunWithLengthType() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE(
data.ndim() == 3,
"DATA should be 3-D tensor <lengths, "
"segments, embeddings>");
CAFFE_ENFORCE(lengths.ndim() == 1, "LENGTH should be 1-D");
auto* output = Output(0);
const auto& shape = data.dims();
output->Resize(shape);
const auto& max_length = data.dims()[0];
const auto& batch_size = data.dims()[1];
const auto& block_size = data.dims()[2];
CAFFE_ENFORCE(
lengths.dims()[0] == batch_size,
"lenths size should be"
" equal to batch size");
const T* data_ptr = data.template data<T>();
const LengthType* lengths_ptr = lengths.template data<LengthType>();
// reversed data
T* rev_data_ptr = output->template mutable_data<T>();
const int grid = max_length * batch_size;
ReversePackedSegments_kernel<T,LengthType><<<grid, 512, 0, context_.cuda_stream()>>>(
max_length,
batch_size,
block_size,
lengths_ptr,
data_ptr,
rev_data_ptr);
}
REGISTER_CUDA_OPERATOR(ReversePackedSegs, ReversePackedSegsOp<CUDAContext>);
} // namespace caffe2
|
7ab7535d0cc8999787ee56cde97820c5eb1c536e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.hpp"
__global__
void axpy(int n, double alpha, const double* x, double* y) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
y[i] = y[i] + alpha*x[i];
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
hipInit(0);
std::cout << "memcopy and daxpy test of size " << n << "\n";
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// calculate grid dimensions
int num_threads = 128;
int num_blocks = (n-1)/num_threads + 1;
// synchronize the host and device so that the timings are accurate
hipDeviceSynchronize();
start = get_time();
hipLaunchKernelGGL(( axpy), dim3(num_blocks), dim3(num_threads), 0, 0, n, 2, x_device, y_device);
hipDeviceSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D: " << time_H2D << " s\n";
std::cout << "D2H: " << time_D2H << " s\n";
std::cout << "axpy: " << time_axpy << " s\n";
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n";
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------\n";
auto H2D_BW = size_in_bytes/1e6*2 / time_H2D;
auto D2H_BW = size_in_bytes/1e6 / time_D2H;
std::cout << "H2D BW: " << H2D_BW << " MB/s\n";
std::cout << "D2H BW: " << D2H_BW << " MB/s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(::fabs(6.-y[i])>1e-15) {
++errors;
}
}
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
hipFree(x_device);
hipFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
|
7ab7535d0cc8999787ee56cde97820c5eb1c536e.cu
|
#include <iostream>
#include <cuda.h>
#include "util.hpp"
__global__
void axpy(int n, double alpha, const double* x, double* y) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) {
y[i] = y[i] + alpha*x[i];
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 16);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
cuInit(0);
std::cout << "memcopy and daxpy test of size " << n << "\n";
double* x_device = malloc_device<double>(n);
double* y_device = malloc_device<double>(n);
double* x_host = malloc_host<double>(n, 1.5);
double* y_host = malloc_host<double>(n, 3.0);
double* y = malloc_host<double>(n, 0.0);
// copy to device
auto start = get_time();
copy_to_device<double>(x_host, x_device, n);
copy_to_device<double>(y_host, y_device, n);
auto time_H2D = get_time() - start;
// calculate grid dimensions
int num_threads = 128;
int num_blocks = (n-1)/num_threads + 1;
// synchronize the host and device so that the timings are accurate
cudaDeviceSynchronize();
start = get_time();
axpy<<<num_blocks, num_threads>>>(n, 2, x_device, y_device);
cudaDeviceSynchronize();
auto time_axpy = get_time() - start;
// check for error in last kernel call
cuda_check_last_kernel("axpy kernel");
// copy result back to host
start = get_time();
copy_to_host<double>(y_device, y, n);
auto time_D2H = get_time() - start;
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D: " << time_H2D << " s\n";
std::cout << "D2H: " << time_D2H << " s\n";
std::cout << "axpy: " << time_axpy << " s\n";
std::cout << std::endl;
std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n";
std::cout << std::endl;
std::cout << "-------\nbandwidth\n-------\n";
auto H2D_BW = size_in_bytes/1e6*2 / time_H2D;
auto D2H_BW = size_in_bytes/1e6 / time_D2H;
std::cout << "H2D BW: " << H2D_BW << " MB/s\n";
std::cout << "D2H BW: " << D2H_BW << " MB/s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(std::fabs(6.-y[i])>1e-15) {
++errors;
}
}
std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n";
cudaFree(x_device);
cudaFree(y_device);
free(x_host);
free(y_host);
free(y);
return 0;
}
|
54e41db25c9db4884532c58611e057d5f797aaf9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <unistd.h>
#include <chrono>
#include <iostream>
#include <vector>
__device__ half float_to_sto_half_direct(float w) {
hiprandState_t state;
hiprand_init((unsigned long long)(w * 100), 0, 0, &state);
half up = __float2half_ru(w);
half down = __float2half_rd(w);
const float up_f32 = __half2float(up);
const float down_f32 = __half2float(down);
// 1 - (w - w_down) / (w_up - w_down) = (w_up - w) / (w_up - w_down) = n / m
const float m = (up_f32 - down_f32);
const float rand = hiprand_uniform(&state);
if (__float_as_uint(m) == 0) {
return up;
}
const float n = (up_f32 - w);
return rand > n / m ? up : down;
}
__device__ float two_to_e(float X) {
const float Y = 16777216 * X; // 2^24
const float U = ((Y + X) - Y) * 0.5;
return U == 0 ? X : U;
}
__device__ half float_to_sto_half_bitcarry(float w) {
hiprandState_t state;
hiprand_init((unsigned long long)(w * 100), 0, 0, &state);
float rand = hiprand_uniform(&state);
float rand_match_w = two_to_e(w) * rand * 0.0009765625; // 2^(-10)
float Z = w + rand_match_w;
return __float2half_rz(Z);
}
__device__ half float_to_sto_half_shortrand(float w, uint8_t rand) {
const unsigned w_int = __float_as_uint(w);
const unsigned w_new = w_int + (rand << 5);
return __float2half_rz(__uint_as_float(w_new));
}
__device__ half float_to_sto_half_assemblefloat(float w, uint8_t rand) {
const unsigned w_int = __float_as_uint(w);
const unsigned assmebles = (w_int & 0xff800000) | (rand << 5);
const unsigned subtract = (w_int & 0xff800000);
const float assmeble_float =
__uint_as_float(assmebles) - __uint_as_float(subtract);
return __float2half_rz(w + assmeble_float);
}
__global__ void convert_float_to_half_direct(half* dst, float* src, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_direct(src[idx]);
}
}
__global__ void
convert_float_to_half_bitcarry(half* dst, float* src, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_bitcarry(src[idx]);
}
}
__global__ void
convert_float_to_half_shortrand(half* dst, float* src, uint8_t* r, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_shortrand(src[idx], r[idx]);
}
}
__global__ void convert_float_to_half_assemblefloat(
half* dst,
float* src,
uint8_t* r,
int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_assemblefloat(src[idx], r[idx]);
}
}
void gen_data(float* d_f32_array, int test_size) {
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // Random seed
hiprandGenerateUniform(gen, d_f32_array, test_size);
hiprandDestroyGenerator(gen);
hipDeviceSynchronize();
}
// generate 64bit random number and then copy back to 8bit memory
void gen_8bit_random(uint8_t* d_random_number, int test_size) {
hiprandGenerator_t gen;
unsigned* d_random_number_f32;
hipMalloc(
&d_random_number_f32,
(test_size / sizeof(unsigned) + 1) * sizeof(unsigned));
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetPseudoRandomGeneratorSeed(gen, 5678ULL); // Random seed
hiprandGenerate(gen, d_random_number_f32, (test_size / sizeof(unsigned) + 1));
hipMemcpy(
d_random_number,
d_random_number_f32,
test_size * sizeof(uint8_t),
hipMemcpyDeviceToDevice);
hiprandDestroyGenerator(gen);
hipFree(d_random_number_f32);
}
__global__ void flush_gpu(char* d_flush, char* d_flush2, bool do_write) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const char val = d_flush[idx];
if (do_write * val) {
d_flush2[idx] = val;
}
}
void flush_cache(
std::vector<char> flush,
char* d_flush,
char* d_flush2,
int cache_size,
bool do_write = false) {
hipMemcpy(d_flush, flush.data(), cache_size, hipMemcpyHostToDevice);
const unsigned num_blocks = cache_size / 512;
hipLaunchKernelGGL(( flush_gpu), dim3(num_blocks), dim3(512), 0, 0, d_flush, d_flush2, do_write);
hipDeviceSynchronize();
}
int main(int argc, char* argv[]) {
std::vector<float> f32_array;
std::vector<half> f16_direct_array;
std::vector<half> f16_bitcarry_array;
std::vector<half> f16_shortrand_array;
std::vector<half> f16_assemblefloat_array;
float* d_f32_array;
half* d_f16_direct_array;
half* d_f16_bitcarry_array;
half* d_f16_shortrand_array;
half* d_f16_assemblefloat_array;
uint8_t* d_random_number;
std::vector<char> flush;
char* d_flush;
char* d_flush2;
int test_size = 10;
bool verbose = false;
int opt;
while ((opt = getopt(argc, argv, "n:v")) != -1) {
switch (opt) {
case 'n':
test_size = atoi(optarg);
break;
case 'v':
verbose = true;
break;
}
}
std::cout << "Start stochastic algorithm tests with test_size = " << test_size
<< std::endl;
constexpr int cache_size = 40 * 1024 * 1024; // A100 40MB L2 cache
f32_array.reserve(test_size);
f16_direct_array.reserve(test_size);
f16_bitcarry_array.reserve(test_size);
f16_shortrand_array.reserve(test_size);
f16_assemblefloat_array.reserve(test_size);
hipMalloc(&d_f32_array, test_size * sizeof(float));
hipMalloc(&d_f16_direct_array, test_size * sizeof(half));
hipMalloc(&d_f16_bitcarry_array, test_size * sizeof(half));
hipMalloc(&d_f16_shortrand_array, test_size * sizeof(half));
hipMalloc(&d_f16_assemblefloat_array, test_size * sizeof(half));
hipMalloc(&d_random_number, test_size * sizeof(uint8_t));
flush.assign(cache_size, 255);
hipMalloc(&d_flush, cache_size * sizeof(char));
hipMalloc(&d_flush2, cache_size * sizeof(char));
gen_data(d_f32_array, test_size);
gen_8bit_random(d_random_number, test_size);
constexpr int block_size = 128;
const int num_blocks = (test_size + block_size - 1) / block_size;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm direct..." << std::endl;
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( convert_float_to_half_direct), dim3(num_blocks), dim3(block_size), 0, 0,
d_f16_direct_array, d_f32_array, test_size);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
hipError_t e = hipGetLastError();
if (e != hipSuccess) {
std::cout << "Cuda failure: " << hipGetErrorString(e) << std::endl;
exit(-1);
}
std::chrono::duration<double> time = end - start;
std::cout << "Direct stochastic algorithm runs: " << time.count() << " sec "
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm bitcarry..." << std::endl;
start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( convert_float_to_half_bitcarry), dim3(num_blocks), dim3(block_size), 0, 0,
d_f16_bitcarry_array, d_f32_array, test_size);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = hipGetLastError();
if (e != hipSuccess) {
std::cout << "Cuda failure: " << hipGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Bitcarry stochastic algorithm runs: " << time.count() << " sec"
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm shortrand..." << std::endl;
start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( convert_float_to_half_shortrand), dim3(num_blocks), dim3(block_size), 0, 0,
d_f16_shortrand_array, d_f32_array, d_random_number, test_size);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = hipGetLastError();
if (e != hipSuccess) {
std::cout << "Cuda failure: " << hipGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Shortrand stochastic algorithm runs: " << time.count() << " sec"
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm assemblefloat..." << std::endl;
start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( convert_float_to_half_assemblefloat), dim3(num_blocks), dim3(block_size), 0, 0,
d_f16_assemblefloat_array, d_f32_array, d_random_number, test_size);
hipDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = hipGetLastError();
if (e != hipSuccess) {
std::cout << "Cuda failure: " << hipGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Assemblefloat stochastic algorithm runs: " << time.count()
<< " sec" << std::endl;
if (verbose) {
hipMemcpy(
f32_array.data(),
d_f32_array,
test_size * sizeof(float),
hipMemcpyDeviceToHost);
hipMemcpy(
f16_direct_array.data(),
d_f16_direct_array,
test_size * sizeof(half),
hipMemcpyDeviceToHost);
hipMemcpy(
f16_bitcarry_array.data(),
d_f16_bitcarry_array,
test_size * sizeof(half),
hipMemcpyDeviceToHost);
hipMemcpy(
f16_shortrand_array.data(),
d_f16_shortrand_array,
test_size * sizeof(half),
hipMemcpyDeviceToHost);
hipMemcpy(
f16_assemblefloat_array.data(),
d_f16_assemblefloat_array,
test_size * sizeof(half),
hipMemcpyDeviceToHost);
for (int i = 0; i < test_size; i++) {
std::cout << std::hexfloat << f32_array[i] << ":\t(up:" << std::hexfloat
<< __half2float(__float2half_ru(f32_array[i]))
<< "\tdown:" << std::hexfloat
<< __half2float(__float2half_rd(f32_array[i]))
<< ") \tdirect: " << std::hexfloat
<< __half2float(f16_direct_array[i])
<< "\tbitcarry: " << std::hexfloat
<< __half2float(f16_bitcarry_array[i])
<< " \tshortrand: " << std::hexfloat
<< __half2float(f16_shortrand_array[i])
<< " \tassemblefloat: " << std::hexfloat
<< __half2float(f16_assemblefloat_array[i]) << std::endl;
}
}
hipFree(d_f32_array);
hipFree(d_f16_direct_array);
hipFree(d_f16_bitcarry_array);
hipFree(d_f16_shortrand_array);
hipFree(d_f16_assemblefloat_array);
return 0;
}
|
54e41db25c9db4884532c58611e057d5f797aaf9.cu
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <cuda.h>
#include <cuda_fp16.h>
#include <curand.h>
#include <curand_kernel.h>
#include <unistd.h>
#include <chrono>
#include <iostream>
#include <vector>
__device__ half float_to_sto_half_direct(float w) {
curandState_t state;
curand_init((unsigned long long)(w * 100), 0, 0, &state);
half up = __float2half_ru(w);
half down = __float2half_rd(w);
const float up_f32 = __half2float(up);
const float down_f32 = __half2float(down);
// 1 - (w - w_down) / (w_up - w_down) = (w_up - w) / (w_up - w_down) = n / m
const float m = (up_f32 - down_f32);
const float rand = curand_uniform(&state);
if (__float_as_uint(m) == 0) {
return up;
}
const float n = (up_f32 - w);
return rand > n / m ? up : down;
}
__device__ float two_to_e(float X) {
const float Y = 16777216 * X; // 2^24
const float U = ((Y + X) - Y) * 0.5;
return U == 0 ? X : U;
}
__device__ half float_to_sto_half_bitcarry(float w) {
curandState_t state;
curand_init((unsigned long long)(w * 100), 0, 0, &state);
float rand = curand_uniform(&state);
float rand_match_w = two_to_e(w) * rand * 0.0009765625; // 2^(-10)
float Z = w + rand_match_w;
return __float2half_rz(Z);
}
__device__ half float_to_sto_half_shortrand(float w, uint8_t rand) {
const unsigned w_int = __float_as_uint(w);
const unsigned w_new = w_int + (rand << 5);
return __float2half_rz(__uint_as_float(w_new));
}
__device__ half float_to_sto_half_assemblefloat(float w, uint8_t rand) {
const unsigned w_int = __float_as_uint(w);
const unsigned assmebles = (w_int & 0xff800000) | (rand << 5);
const unsigned subtract = (w_int & 0xff800000);
const float assmeble_float =
__uint_as_float(assmebles) - __uint_as_float(subtract);
return __float2half_rz(w + assmeble_float);
}
__global__ void convert_float_to_half_direct(half* dst, float* src, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_direct(src[idx]);
}
}
__global__ void
convert_float_to_half_bitcarry(half* dst, float* src, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_bitcarry(src[idx]);
}
}
__global__ void
convert_float_to_half_shortrand(half* dst, float* src, uint8_t* r, int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_shortrand(src[idx], r[idx]);
}
}
__global__ void convert_float_to_half_assemblefloat(
half* dst,
float* src,
uint8_t* r,
int size) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
dst[idx] = float_to_sto_half_assemblefloat(src[idx], r[idx]);
}
}
void gen_data(float* d_f32_array, int test_size) {
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // Random seed
curandGenerateUniform(gen, d_f32_array, test_size);
curandDestroyGenerator(gen);
cudaDeviceSynchronize();
}
// generate 64bit random number and then copy back to 8bit memory
void gen_8bit_random(uint8_t* d_random_number, int test_size) {
curandGenerator_t gen;
unsigned* d_random_number_f32;
cudaMalloc(
&d_random_number_f32,
(test_size / sizeof(unsigned) + 1) * sizeof(unsigned));
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
curandSetPseudoRandomGeneratorSeed(gen, 5678ULL); // Random seed
curandGenerate(gen, d_random_number_f32, (test_size / sizeof(unsigned) + 1));
cudaMemcpy(
d_random_number,
d_random_number_f32,
test_size * sizeof(uint8_t),
cudaMemcpyDeviceToDevice);
curandDestroyGenerator(gen);
cudaFree(d_random_number_f32);
}
__global__ void flush_gpu(char* d_flush, char* d_flush2, bool do_write) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const char val = d_flush[idx];
if (do_write * val) {
d_flush2[idx] = val;
}
}
void flush_cache(
std::vector<char> flush,
char* d_flush,
char* d_flush2,
int cache_size,
bool do_write = false) {
cudaMemcpy(d_flush, flush.data(), cache_size, cudaMemcpyHostToDevice);
const unsigned num_blocks = cache_size / 512;
flush_gpu<<<num_blocks, 512>>>(d_flush, d_flush2, do_write);
cudaDeviceSynchronize();
}
int main(int argc, char* argv[]) {
std::vector<float> f32_array;
std::vector<half> f16_direct_array;
std::vector<half> f16_bitcarry_array;
std::vector<half> f16_shortrand_array;
std::vector<half> f16_assemblefloat_array;
float* d_f32_array;
half* d_f16_direct_array;
half* d_f16_bitcarry_array;
half* d_f16_shortrand_array;
half* d_f16_assemblefloat_array;
uint8_t* d_random_number;
std::vector<char> flush;
char* d_flush;
char* d_flush2;
int test_size = 10;
bool verbose = false;
int opt;
while ((opt = getopt(argc, argv, "n:v")) != -1) {
switch (opt) {
case 'n':
test_size = atoi(optarg);
break;
case 'v':
verbose = true;
break;
}
}
std::cout << "Start stochastic algorithm tests with test_size = " << test_size
<< std::endl;
constexpr int cache_size = 40 * 1024 * 1024; // A100 40MB L2 cache
f32_array.reserve(test_size);
f16_direct_array.reserve(test_size);
f16_bitcarry_array.reserve(test_size);
f16_shortrand_array.reserve(test_size);
f16_assemblefloat_array.reserve(test_size);
cudaMalloc(&d_f32_array, test_size * sizeof(float));
cudaMalloc(&d_f16_direct_array, test_size * sizeof(half));
cudaMalloc(&d_f16_bitcarry_array, test_size * sizeof(half));
cudaMalloc(&d_f16_shortrand_array, test_size * sizeof(half));
cudaMalloc(&d_f16_assemblefloat_array, test_size * sizeof(half));
cudaMalloc(&d_random_number, test_size * sizeof(uint8_t));
flush.assign(cache_size, 255);
cudaMalloc(&d_flush, cache_size * sizeof(char));
cudaMalloc(&d_flush2, cache_size * sizeof(char));
gen_data(d_f32_array, test_size);
gen_8bit_random(d_random_number, test_size);
constexpr int block_size = 128;
const int num_blocks = (test_size + block_size - 1) / block_size;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm direct..." << std::endl;
auto start = std::chrono::high_resolution_clock::now();
convert_float_to_half_direct<<<num_blocks, block_size>>>(
d_f16_direct_array, d_f32_array, test_size);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess) {
std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl;
exit(-1);
}
std::chrono::duration<double> time = end - start;
std::cout << "Direct stochastic algorithm runs: " << time.count() << " sec "
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm bitcarry..." << std::endl;
start = std::chrono::high_resolution_clock::now();
convert_float_to_half_bitcarry<<<num_blocks, block_size>>>(
d_f16_bitcarry_array, d_f32_array, test_size);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = cudaGetLastError();
if (e != cudaSuccess) {
std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Bitcarry stochastic algorithm runs: " << time.count() << " sec"
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm shortrand..." << std::endl;
start = std::chrono::high_resolution_clock::now();
convert_float_to_half_shortrand<<<num_blocks, block_size>>>(
d_f16_shortrand_array, d_f32_array, d_random_number, test_size);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = cudaGetLastError();
if (e != cudaSuccess) {
std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Shortrand stochastic algorithm runs: " << time.count() << " sec"
<< std::endl;
flush_cache(flush, d_flush, d_flush2, cache_size);
std::cout << "Starting algorithm assemblefloat..." << std::endl;
start = std::chrono::high_resolution_clock::now();
convert_float_to_half_assemblefloat<<<num_blocks, block_size>>>(
d_f16_assemblefloat_array, d_f32_array, d_random_number, test_size);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
e = cudaGetLastError();
if (e != cudaSuccess) {
std::cout << "Cuda failure: " << cudaGetErrorString(e) << std::endl;
exit(-1);
}
time = end - start;
std::cout << "Assemblefloat stochastic algorithm runs: " << time.count()
<< " sec" << std::endl;
if (verbose) {
cudaMemcpy(
f32_array.data(),
d_f32_array,
test_size * sizeof(float),
cudaMemcpyDeviceToHost);
cudaMemcpy(
f16_direct_array.data(),
d_f16_direct_array,
test_size * sizeof(half),
cudaMemcpyDeviceToHost);
cudaMemcpy(
f16_bitcarry_array.data(),
d_f16_bitcarry_array,
test_size * sizeof(half),
cudaMemcpyDeviceToHost);
cudaMemcpy(
f16_shortrand_array.data(),
d_f16_shortrand_array,
test_size * sizeof(half),
cudaMemcpyDeviceToHost);
cudaMemcpy(
f16_assemblefloat_array.data(),
d_f16_assemblefloat_array,
test_size * sizeof(half),
cudaMemcpyDeviceToHost);
for (int i = 0; i < test_size; i++) {
std::cout << std::hexfloat << f32_array[i] << ":\t(up:" << std::hexfloat
<< __half2float(__float2half_ru(f32_array[i]))
<< "\tdown:" << std::hexfloat
<< __half2float(__float2half_rd(f32_array[i]))
<< ") \tdirect: " << std::hexfloat
<< __half2float(f16_direct_array[i])
<< "\tbitcarry: " << std::hexfloat
<< __half2float(f16_bitcarry_array[i])
<< " \tshortrand: " << std::hexfloat
<< __half2float(f16_shortrand_array[i])
<< " \tassemblefloat: " << std::hexfloat
<< __half2float(f16_assemblefloat_array[i]) << std::endl;
}
}
cudaFree(d_f32_array);
cudaFree(d_f16_direct_array);
cudaFree(d_f16_bitcarry_array);
cudaFree(d_f16_shortrand_array);
cudaFree(d_f16_assemblefloat_array);
return 0;
}
|
b1db93caf27e519b65bea5fab18f5e96aa5efbd2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vecAdd(int *A, int *B, int *C) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
C[i] = A[i];
}
|
b1db93caf27e519b65bea5fab18f5e96aa5efbd2.cu
|
#include "includes.h"
__global__ void vecAdd(int *A, int *B, int *C) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
C[i] = A[i];
}
|
39e410c5e147656337cefdc8b114be50873cb6f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <vector>
#include "caffe/layers/activation/sigmoid_layer.hpp"
namespace caffe {
static __global__ void SigmoidForward(const int n, const float* in, float* out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = 1. / (1. + exp(-in[index]));
}
}
void SigmoidLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
const float* bottom_data = bottom[0]->gpu_data();
float* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
static __global__ void SigmoidBackward(const int n, const float* in_diff, const float* out_data, float* out_diff)
{
CUDA_KERNEL_LOOP(index, n)
{
const float sigmoid_x = out_data[index];
out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);
}
}
void SigmoidLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
const float* top_data = top[0]->gpu_data();
const float* top_diff = top[0]->gpu_diff();
float* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidBackward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
void SigmoidLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
|
39e410c5e147656337cefdc8b114be50873cb6f6.cu
|
#include <cmath>
#include <vector>
#include "caffe/layers/activation/sigmoid_layer.hpp"
namespace caffe {
static __global__ void SigmoidForward(const int n, const float* in, float* out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = 1. / (1. + exp(-in[index]));
}
}
void SigmoidLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
const float* bottom_data = bottom[0]->gpu_data();
float* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
static __global__ void SigmoidBackward(const int n, const float* in_diff, const float* out_data, float* out_diff)
{
CUDA_KERNEL_LOOP(index, n)
{
const float sigmoid_x = out_data[index];
out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x);
}
}
void SigmoidLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
const float* top_data = top[0]->gpu_data();
const float* top_diff = top[0]->gpu_diff();
float* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidBackward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
void SigmoidLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
|
fa5efe060a6cc70b57630f9ad5f347dccf6ee1b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include "model/gpt_encoder.h"
#include "proto/gpt_weight.h"
#include "tools/util.h"
#ifdef FP16_MODE
const lightseq::cuda::OperationType gpt_optype =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType gpt_optype =
lightseq::cuda::OperationType::FP32;
#endif
namespace py = pybind11;
namespace lightseq {
namespace cuda {
class Gpt {
private:
typedef lightseq::cuda::OperationTypeTraits<gpt_optype> optraits;
lightseq::cuda::GptEncoder<gpt_optype>* encoder_;
int* d_input_;
int* d_sample_id;
float* d_ppl;
int _max_batch_size;
hipStream_t stream_;
hipStream_t cache_stream_;
hipblasHandle_t hd_;
lightseq::cuda::GptWeight<gpt_optype> tw_;
std::set<std::string> available_sampling_methods = {"topk", "topp"};
public:
Gpt(const std::string weight_path, const int max_batch_size,
const int max_step = 50)
: stream_(nullptr), hd_(nullptr), encoder_(nullptr) {
/* ---step1. init environment--- */
_max_batch_size = max_batch_size;
hipError_t cuerr = hipSetDevice(0);
if (cuerr != hipSuccess) {
throw std::runtime_error(hipGetErrorString(cuerr));
}
cuerr = hipStreamCreate(&stream_);
if (cuerr != hipSuccess) {
throw std::runtime_error(hipGetErrorString(cuerr));
}
cuerr = hipStreamCreate(&cache_stream_);
if (cuerr != hipSuccess) {
throw std::runtime_error(hipGetErrorString(cuerr));
}
hipblasStatus_t cublaserr = hipblasCreate(&hd_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to creat cublas handle ");
}
cublaserr = hipblasSetStream(hd_, stream_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to set stream for cublas handle");
}
/* ---step2. load model weights into GPU memory--- */
// saved in custom proto file
std::string model_weights_path = weight_path;
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
throw std::runtime_error(res);
}
// set max_step before buffer init
tw_._max_step = max_step;
/*
step3. instantiate gpt encoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
// register device memory for inputs and outputs
lightseq::cuda::CHECK_GPU_ERROR(
hipMalloc(&d_input_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(hipMalloc(
&d_sample_id, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(
hipMalloc(&d_ppl, _max_batch_size * sizeof(float)));
encoder_ = new lightseq::cuda::GptEncoder<gpt_optype>(
max_batch_size, d_input_, d_ppl, d_sample_id, tw_, stream_,
cache_stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
size_t buf_bytesize = encoder_->compute_buffer_bytesize();
std::cout << "gpt2 buf_bytesize: " << buf_bytesize << std::endl;
void* d_buf_;
// encoder and decoder use the same buffer to save gpu memory useage
lightseq::cuda::CHECK_GPU_ERROR(
hipMalloc((void**)&d_buf_, (size_t)buf_bytesize));
encoder_->init_buffer(d_buf_);
cuerr = hipStreamSynchronize(stream_);
if (cuerr != hipSuccess) {
std::cout << "Failed to init GPU for transformer" << std::endl;
std::runtime_error(std::string(hipGetErrorString(cuerr)));
}
}
py::array_t<float> ppl(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq) {
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int* input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
hipMemcpyHostToDevice, stream_));
encoder_->run_one_infer(batch_size, batch_seq_len);
auto probs = py::array_t<float>(batch_size);
float* probs_data = probs.mutable_data(0);
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpy(probs_data, d_ppl,
sizeof(float) * probs.size(),
hipMemcpyDeviceToHost));
return probs;
}
py::array_t<int> sample(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq,
std::string sampling_method = "topk", const int topk = 1,
const float topp = 0.75) {
if (available_sampling_methods.find(sampling_method) !=
available_sampling_methods.end()) {
tw_._sampling_method = sampling_method;
}
assert(topk >= 0);
tw_._topk = topk;
assert(topp >= 0.0 && topp <= 1.0);
tw_._topp = topp;
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int* input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
hipMemcpyHostToDevice, stream_));
int sampled_seq_len = encoder_->run_one_sample(batch_size, batch_seq_len);
auto tokens = py::array_t<int>({batch_size, sampled_seq_len});
int* tokens_data = tokens.mutable_data(0);
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpy(tokens_data, d_sample_id,
sizeof(int) * tokens.size(),
hipMemcpyDeviceToHost));
return tokens;
}
};
} // namespace cuda
} // namespace lightseq
|
fa5efe060a6cc70b57630f9ad5f347dccf6ee1b5.cu
|
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include "model/gpt_encoder.h"
#include "proto/gpt_weight.h"
#include "tools/util.h"
#ifdef FP16_MODE
const lightseq::cuda::OperationType gpt_optype =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType gpt_optype =
lightseq::cuda::OperationType::FP32;
#endif
namespace py = pybind11;
namespace lightseq {
namespace cuda {
class Gpt {
private:
typedef lightseq::cuda::OperationTypeTraits<gpt_optype> optraits;
lightseq::cuda::GptEncoder<gpt_optype>* encoder_;
int* d_input_;
int* d_sample_id;
float* d_ppl;
int _max_batch_size;
cudaStream_t stream_;
cudaStream_t cache_stream_;
cublasHandle_t hd_;
lightseq::cuda::GptWeight<gpt_optype> tw_;
std::set<std::string> available_sampling_methods = {"topk", "topp"};
public:
Gpt(const std::string weight_path, const int max_batch_size,
const int max_step = 50)
: stream_(nullptr), hd_(nullptr), encoder_(nullptr) {
/* ---step1. init environment--- */
_max_batch_size = max_batch_size;
cudaError_t cuerr = cudaSetDevice(0);
if (cuerr != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(cuerr));
}
cuerr = cudaStreamCreate(&stream_);
if (cuerr != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(cuerr));
}
cuerr = cudaStreamCreate(&cache_stream_);
if (cuerr != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(cuerr));
}
cublasStatus_t cublaserr = cublasCreate(&hd_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to creat cublas handle ");
}
cublaserr = cublasSetStream(hd_, stream_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to set stream for cublas handle");
}
/* ---step2. load model weights into GPU memory--- */
// saved in custom proto file
std::string model_weights_path = weight_path;
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
throw std::runtime_error(res);
}
// set max_step before buffer init
tw_._max_step = max_step;
/*
step3. instantiate gpt encoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
// register device memory for inputs and outputs
lightseq::cuda::CHECK_GPU_ERROR(
cudaMalloc(&d_input_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc(
&d_sample_id, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(
cudaMalloc(&d_ppl, _max_batch_size * sizeof(float)));
encoder_ = new lightseq::cuda::GptEncoder<gpt_optype>(
max_batch_size, d_input_, d_ppl, d_sample_id, tw_, stream_,
cache_stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
size_t buf_bytesize = encoder_->compute_buffer_bytesize();
std::cout << "gpt2 buf_bytesize: " << buf_bytesize << std::endl;
void* d_buf_;
// encoder and decoder use the same buffer to save gpu memory useage
lightseq::cuda::CHECK_GPU_ERROR(
cudaMalloc((void**)&d_buf_, (size_t)buf_bytesize));
encoder_->init_buffer(d_buf_);
cuerr = cudaStreamSynchronize(stream_);
if (cuerr != cudaSuccess) {
std::cout << "Failed to init GPU for transformer" << std::endl;
std::runtime_error(std::string(cudaGetErrorString(cuerr)));
}
}
py::array_t<float> ppl(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq) {
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int* input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
cudaMemcpyHostToDevice, stream_));
encoder_->run_one_infer(batch_size, batch_seq_len);
auto probs = py::array_t<float>(batch_size);
float* probs_data = probs.mutable_data(0);
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpy(probs_data, d_ppl,
sizeof(float) * probs.size(),
cudaMemcpyDeviceToHost));
return probs;
}
py::array_t<int> sample(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq,
std::string sampling_method = "topk", const int topk = 1,
const float topp = 0.75) {
if (available_sampling_methods.find(sampling_method) !=
available_sampling_methods.end()) {
tw_._sampling_method = sampling_method;
}
assert(topk >= 0);
tw_._topk = topk;
assert(topp >= 0.0 && topp <= 1.0);
tw_._topp = topp;
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int* input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
cudaMemcpyHostToDevice, stream_));
int sampled_seq_len = encoder_->run_one_sample(batch_size, batch_seq_len);
auto tokens = py::array_t<int>({batch_size, sampled_seq_len});
int* tokens_data = tokens.mutable_data(0);
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpy(tokens_data, d_sample_id,
sizeof(int) * tokens.size(),
cudaMemcpyDeviceToHost));
return tokens;
}
};
} // namespace cuda
} // namespace lightseq
|
1ed2e0ffd95594b885e22a51a2093593469ce863.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void fuse_conv_batchnorm_bias_kernel(int volume,
DATATYPE* dst_ptr,
DATATYPE* scale,
DATATYPE* beta,
DATATYPE* mean,
DATATYPE* var)
{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
CUDA_KERNEL_LOOP(i, volume)
{
dst_ptr[i] = beta[i] - scale[i] * mean[i] / sqrt(var[i] + CUDNN_BN_MIN_EPSILON);
}
}
void FuseConvBatchNormBias::map(void)
{
assert(inputs[0].numDim == 1);
assert(inputs[1].numDim == 1);
assert(inputs[2].numDim == 1);
assert(inputs[3].numDim == 1);
size_t outputSize = sizeof(DATATYPE) * outputs[0].volume();
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void FuseConvBatchNormBias::unmap(void)
{
checkCUDA(hipFree(outputs[0].data_ptr));
}
void FuseConvBatchNormBias::forward(bool block)
{
int volume = outputs[0].volume();
DATATYPE* scale_ptr = (DATATYPE*) inputs[0].data_ptr;
DATATYPE* beta_ptr = (DATATYPE*) inputs[1].data_ptr;
DATATYPE* mean_ptr = (DATATYPE*) inputs[2].data_ptr;
DATATYPE* var_ptr = (DATATYPE*) inputs[3].data_ptr;
hipLaunchKernelGGL(( fuse_conv_batchnorm_bias_kernel), dim3(GET_BLOCKS(outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
volume, (DATATYPE*)outputs[0].data_ptr, scale_ptr, beta_ptr, mean_ptr, var_ptr);
if (block)
checkCUDA(hipDeviceSynchronize());
}
|
1ed2e0ffd95594b885e22a51a2093593469ce863.cu
|
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void fuse_conv_batchnorm_bias_kernel(int volume,
DATATYPE* dst_ptr,
DATATYPE* scale,
DATATYPE* beta,
DATATYPE* mean,
DATATYPE* var)
{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
CUDA_KERNEL_LOOP(i, volume)
{
dst_ptr[i] = beta[i] - scale[i] * mean[i] / sqrt(var[i] + CUDNN_BN_MIN_EPSILON);
}
}
void FuseConvBatchNormBias::map(void)
{
assert(inputs[0].numDim == 1);
assert(inputs[1].numDim == 1);
assert(inputs[2].numDim == 1);
assert(inputs[3].numDim == 1);
size_t outputSize = sizeof(DATATYPE) * outputs[0].volume();
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void FuseConvBatchNormBias::unmap(void)
{
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void FuseConvBatchNormBias::forward(bool block)
{
int volume = outputs[0].volume();
DATATYPE* scale_ptr = (DATATYPE*) inputs[0].data_ptr;
DATATYPE* beta_ptr = (DATATYPE*) inputs[1].data_ptr;
DATATYPE* mean_ptr = (DATATYPE*) inputs[2].data_ptr;
DATATYPE* var_ptr = (DATATYPE*) inputs[3].data_ptr;
fuse_conv_batchnorm_bias_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>(
volume, (DATATYPE*)outputs[0].data_ptr, scale_ptr, beta_ptr, mean_ptr, var_ptr);
if (block)
checkCUDA(cudaDeviceSynchronize());
}
|
517e941de123aa8c5aefb894c44f5d3a21a0083d.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by DY on 17-10-14.
//
#ifndef NLP_CUDA_CUMATRIX_HEADER_H
#define NLP_CUDA_CUMATRIX_HEADER_H
template <typename T>
struct CDenseMatrix;
#include "CuDenseExpr.cu"
/*
* Row major, Shadow copy
*/
template <typename T>
struct CuDenseMatrix : CuDenseExpr<T, CuDenseMatrix<T> > {
T *data;
bool needFree;
int rows, cols;
virtual ~CuDenseMatrix() {
if (needFree) {
checkCudaErrors(hipFree(data));
}
}
void initData(const T *data, int rows, int cols) {
checkCudaErrors(hipMalloc((void **) &this->data, sizeof(T) * (rows * cols)));
checkCudaErrors(hipMemcpy(this->data, data, sizeof(T) * (rows * cols), hipMemcpyHostToDevice));
}
CuDenseMatrix(const T* data, int rows, int cols) {
initData(data, rows, cols);
this->rows = rows;
this->cols = cols;
this->needFree = true;
}
CuDenseMatrix(int rows, int cols) {
checkCudaErrors(hipMalloc((void **) &this->data, sizeof(T) * (rows * cols)));
this->rows = rows;
this->cols = cols;
this->needFree = true;
}
CuDenseMatrix(const CuDenseMatrix &that) {
this->data = that.data;
this->rows = that.rows;
this->cols = that.cols;
this->needFree = false;
}
CuDenseMatrix& operator=(const CuDenseMatrix &that) {
if (this != &that) {
if (this->rows * this->cols != that.rows * this->cols) {
this->~CuDenseMatrix();
}
this->data = that.data;
this->rows = that.rows;
this->cols = that.cols;
this->needFree = true;
}
return *this;
}
CuDenseMatrix<T>& operator=(const CDenseMatrix<T>& o);
CuDenseMatrix<T>& operator=(T value) {
fillDevice(data, getNnz(), CuDenseConstExpr<T>(value));
checkCudaErrors(hipDeviceSynchronize());
return *this;
}
template <class ETYPE>
CuDenseMatrix<T>& operator=(const CuDenseExpr<T, ETYPE> &e) {
fillDevice(data, getNnz(), e);
checkCudaErrors(hipDeviceSynchronize());
return *this;
}
template <class OP, class LHS>
CuDenseMatrix& operator=(const CuDenseZipExpr<OP, LHS, T> &e) {
fillDevice(data, getNnz(), e);
checkCudaErrors(hipDeviceSynchronize());
return *this;
}
template <class E>
CuDenseMatrix& operator+=(E expr) {
return *this = *this + expr;
}
template <class E>
CuDenseMatrix& operator-=(E expr) {
return *this = *this - expr;
}
template <class E>
CuDenseMatrix& operator*=(E expr) {
return *this = *this * expr;
}
template <class E>
CuDenseMatrix& operator/=(E expr) {
return *this = *this / expr;
}
void reshape(int rows, int cols) {
assert(rows * cols == this->rows * this->cols && rows > 0 && cols > 0);
this->rows = rows;
this->cols = cols;
}
CuDenseMatrix<T> operator~();
__device__ inline
T& at(int r, int c) const {
r %= rows; // broad-casting
c %= cols;
return data[r * cols + c];
}
__device__ inline
T& at(int i) const {
return data[i];
}
__device__ __host__ inline
int nrow() const {
return rows;
}
__device__ __host__ inline
int ncol() const {
return cols;
}
__device__ __host__ inline
int getNnz() const {
return rows * cols;
}
void print(int rows = 10, int cols = 10);
};
template <typename T>
T sum(const CuDenseMatrix<T>& m) {
thrust::device_ptr<T> devicePtr(m.data);
return thrust::reduce(devicePtr, devicePtr + m.getNnz());
}
#endif //NLP_CUDA_CUMATRIX_H
|
517e941de123aa8c5aefb894c44f5d3a21a0083d.cu
|
//
// Created by DY on 17-10-14.
//
#ifndef NLP_CUDA_CUMATRIX_HEADER_H
#define NLP_CUDA_CUMATRIX_HEADER_H
template <typename T>
struct CDenseMatrix;
#include "CuDenseExpr.cu"
/*
* Row major, Shadow copy
*/
template <typename T>
struct CuDenseMatrix : CuDenseExpr<T, CuDenseMatrix<T> > {
T *data;
bool needFree;
int rows, cols;
virtual ~CuDenseMatrix() {
if (needFree) {
checkCudaErrors(cudaFree(data));
}
}
void initData(const T *data, int rows, int cols) {
checkCudaErrors(cudaMalloc((void **) &this->data, sizeof(T) * (rows * cols)));
checkCudaErrors(cudaMemcpy(this->data, data, sizeof(T) * (rows * cols), cudaMemcpyHostToDevice));
}
CuDenseMatrix(const T* data, int rows, int cols) {
initData(data, rows, cols);
this->rows = rows;
this->cols = cols;
this->needFree = true;
}
CuDenseMatrix(int rows, int cols) {
checkCudaErrors(cudaMalloc((void **) &this->data, sizeof(T) * (rows * cols)));
this->rows = rows;
this->cols = cols;
this->needFree = true;
}
CuDenseMatrix(const CuDenseMatrix &that) {
this->data = that.data;
this->rows = that.rows;
this->cols = that.cols;
this->needFree = false;
}
CuDenseMatrix& operator=(const CuDenseMatrix &that) {
if (this != &that) {
if (this->rows * this->cols != that.rows * this->cols) {
this->~CuDenseMatrix();
}
this->data = that.data;
this->rows = that.rows;
this->cols = that.cols;
this->needFree = true;
}
return *this;
}
CuDenseMatrix<T>& operator=(const CDenseMatrix<T>& o);
CuDenseMatrix<T>& operator=(T value) {
fillDevice(data, getNnz(), CuDenseConstExpr<T>(value));
checkCudaErrors(cudaDeviceSynchronize());
return *this;
}
template <class ETYPE>
CuDenseMatrix<T>& operator=(const CuDenseExpr<T, ETYPE> &e) {
fillDevice(data, getNnz(), e);
checkCudaErrors(cudaDeviceSynchronize());
return *this;
}
template <class OP, class LHS>
CuDenseMatrix& operator=(const CuDenseZipExpr<OP, LHS, T> &e) {
fillDevice(data, getNnz(), e);
checkCudaErrors(cudaDeviceSynchronize());
return *this;
}
template <class E>
CuDenseMatrix& operator+=(E expr) {
return *this = *this + expr;
}
template <class E>
CuDenseMatrix& operator-=(E expr) {
return *this = *this - expr;
}
template <class E>
CuDenseMatrix& operator*=(E expr) {
return *this = *this * expr;
}
template <class E>
CuDenseMatrix& operator/=(E expr) {
return *this = *this / expr;
}
void reshape(int rows, int cols) {
assert(rows * cols == this->rows * this->cols && rows > 0 && cols > 0);
this->rows = rows;
this->cols = cols;
}
CuDenseMatrix<T> operator~();
__device__ inline
T& at(int r, int c) const {
r %= rows; // broad-casting
c %= cols;
return data[r * cols + c];
}
__device__ inline
T& at(int i) const {
return data[i];
}
__device__ __host__ inline
int nrow() const {
return rows;
}
__device__ __host__ inline
int ncol() const {
return cols;
}
__device__ __host__ inline
int getNnz() const {
return rows * cols;
}
void print(int rows = 10, int cols = 10);
};
template <typename T>
T sum(const CuDenseMatrix<T>& m) {
thrust::device_ptr<T> devicePtr(m.data);
return thrust::reduce(devicePtr, devicePtr + m.getNnz());
}
#endif //NLP_CUDA_CUMATRIX_H
|
b28aaee3f78c0fa608d16f4c968f447c43eaeab7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
hipMallocManaged(&a,size);
init(a, N);
size_t threads_per_block = 10;
size_t number_of_blocks = 10;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
hipFree(a);
}
|
b28aaee3f78c0fa608d16f4c968f447c43eaeab7.cu
|
#include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
cudaMallocManaged(&a,size);
init(a, N);
size_t threads_per_block = 10;
size_t number_of_blocks = 10;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
cudaFree(a);
}
|
2272acdf670c0a021a65a7329802040345d39a95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cutil.h"
#include "filter.h"
#include "timer.h"
#include "effects.h"
#include "symbol.h"
#include "recfilter.h"
#include "image_util.h"
#include "blue_noise.h"
#include "cubic_sampler.h"
#include "box_sampler.h"
#include "bspline3.h"
#include "mitchell_netravali.h"
#include "sacht_nehab3.h"
#if CUDA_SM < 20
# include "cuPrintf.cu"
# if __CUDA_ARCH__
# define printf cuPrintf
# endif
#endif
#define USE_LAUNCH_BOUNDS 1
const int BW_F1 = 32, // cuda block width
BH_F1 = 8;
const int BW_F2 = 32,
BH_F2 = 8;
#if USE_LAUNCH_BOUNDS
const int
#if SAMPDIM == 8 && CUDA_SM >= 20
NB_F1 = 2, // number of blocks resident per SM
#else
NB_F1 = 1, // number of blocks resident per SM
#endif
NB_F2 = 4;
#endif
__constant__ float2 c_blue_noise[SAMPDIM];
__constant__ float c_prefilter_data[SAMPDIM*KS*KS];
texture<float, 2, hipReadModeElementType> t_aux_float;
struct texfetch_aux_float
{
typedef float result_type;
__device__ float operator()(float x, float y)
{
return tex2D(t_aux_float, x, y);
}
};
__constant__ filter_operation filter_op;
// do the actual value processing according to what's in 'filter_op'
template <effect_type OP, class S>
__device__ typename S::result_type do_filter(const S &sampler, float2 pos)
{
typename S::template rebind_sampler<texfetch_aux_float>::type sampler_aux_float;
typedef typename S::result_type result_type;
switch(OP)
{
case EFFECT_POSTERIZE:
return posterize(sampler(pos), filter_op.levels);
case EFFECT_SCALE:
return scale(sampler(pos),filter_op.scale);
case EFFECT_BIAS:
return bias(sampler(pos),filter_op.bias);
case EFFECT_ROOT:
return root(sampler(pos),filter_op.degree);
case EFFECT_THRESHOLD:
return threshold(sampler(pos),filter_op.minimum, filter_op.maximum);
case EFFECT_REPLACEMENT:
return replacement(sampler(pos),
filter_op.old_color,
filter_op.new_color,
filter_op.tau);
case EFFECT_GRADIENT_EDGE_DETECTION:
return gradient_edge_detection(sampler(pos,1,0),sampler(pos,0,1));
case EFFECT_LAPLACIAN:
return laplacian(sampler(pos,2,0),sampler(pos,0,2));
case EFFECT_LAPLACE_EDGE_ENHANCEMENT:
return laplace_edge_enhancement(sampler(pos),
sampler(pos,2,0),sampler(pos,0,2),
filter_op.multiple);
case EFFECT_YAROSLAVSKY_BILATERAL:
return yaroslavsky_bilateral(sampler(pos),
sampler(pos,1,0), sampler(pos,0,1),
sampler(pos,1,1),
sampler(pos,2,0),sampler(pos,0,2),
filter_op.rho, filter_op.h);
case EFFECT_BRIGHTNESS_CONTRAST:
return brightness_contrast(sampler(pos),filter_op.brightness,
filter_op.contrast);
case EFFECT_HUE_SATURATION_LIGHTNESS:
return hue_saturation_lightness(sampler(pos),filter_op.hue,
filter_op.saturation,filter_op.lightness);
case EFFECT_UNSHARP_MASK:
return unsharp_mask(sampler(pos),sampler_aux_float(pos),
filter_op.amount,filter_op.threshold);
case EFFECT_EMBOSS:
{
result_type i0 = sampler(pos),
i1 = sampler(pos+make_float2(-filter_op.offset,
filter_op.offset));
return saturate(filter_op.amount*(i0-i1)+.5f);
}
case EFFECT_BILATERAL:
{
const float scale = 3*filter_op.sigma_s;
// we're using sigma_r*3 to compesate something I don't understand
const float inv_2sigma_r2 = 1.0f/(2*filter_op.sigma_r*filter_op.sigma_r),
inv_2sigma_s2 = 1.0f/(2*filter_op.sigma_s*filter_op.sigma_s);
const result_type center = sampler(pos);
result_type sum_weight = pixel_traits<result_type>::make_pixel(0),
sum_color = pixel_traits<result_type>::make_pixel(0);
const float space = 1.0f/8;
for(int i=0; i<8; ++i)
{
for(int j=0; j<8; ++j)
{
// d = [-0.5+1/16;0.5-1/16]
float2 d = make_float2(j+0.5f, i+0.5f)*space - 0.5f;
d *= scale;
float weight_s = expf(-(d.x*d.x + d.y*d.y)*inv_2sigma_s2);
result_type c = sampler(pos+d),
dc = center-c,
weight = expf(dc*dc*-inv_2sigma_r2)*weight_s;
sum_color += c*weight;
sum_weight += weight;
}
}
return sum_color / sum_weight;
}
case EFFECT_IDENTITY:
default:
return sampler(pos);
}
}
template <int C>
struct filter_traits {};
template <int C>
struct sum_traits
: pixel_traits<float,C+1>
{
typedef typename pixel_traits<float,C+1>::pixel_type type;
};
struct filter_plan
{
filter_plan()
: a_in(NULL)
, a_aux_float(NULL)
, prefilter_recfilter_plan(NULL)
, gaussblur_plan(NULL)
{
}
virtual ~filter_plan()
{
free(prefilter_recfilter_plan);
if(a_in)
hipFreeArray(a_in);
if(a_aux_float)
hipFreeArray(a_aux_float);
free(gaussblur_plan);
}
int flags;
hipArray *a_in, *a_aux_float;
dimage<float> img_aux_float_orig,
img_aux_float;
filter_operation op;
recfilter5_plan *prefilter_recfilter_plan;
gaussian_blur_plan *gaussblur_plan;
};
template <int C>
struct filter_plan_C : filter_plan
{
dimage<typename sum_traits<C>::type,KS*KS> temp_image;
};
template<int C>
void copy_to_array(hipArray *out, dimage_ptr<const float,C> in);
void init_pre_filter(float (*prefilter)(float))
{
std::vector<float2> blue_noise;
std::vector<float> prefilter_data;
blue_noise.reserve(SAMPDIM);
prefilter_data.reserve(SAMPDIM*KS*KS);
for(int i=0; i<SAMPDIM; ++i)
{
float2 n = make_float2(blue_noise_x[i], blue_noise_y[i]);
blue_noise.push_back(n);
for(int y=0; y<KS; ++y)
{
for(int x=0; x<KS; ++x)
{
prefilter_data.push_back(prefilter(x+n.x-1.5)*
prefilter(y+n.y-1.5)/SAMPDIM);
}
}
}
gpu::copy_to_symbol(c_blue_noise,blue_noise);
gpu::copy_to_symbol(c_prefilter_data,prefilter_data);
}
template<int C>
filter_plan *
filter_create_plan(dimage_ptr<const float,C> img, const filter_operation &op,/*{{{*/
int flags)
{
assert(!img.empty());
typedef filter_traits<C> cfg;
typedef typename pixel_traits<float,C>::texel_type texel_type;
typedef typename sum_traits<C>::type sum_type;
filter_plan_C<C> *plan = new filter_plan_C<C>;
plan->flags = flags;
plan->op = op;
int imgsize = img.width()*img.height();
Vector<float,1+1> weights;
// calculate cubic b-spline weights
float a = 2.f-std::sqrt(3.0f);
weights[0] = 1+a;
weights[1] = a;
base_timer *timer = NULL;
// copy the input data to a texture
hipChannelFormatDesc ccd = hipCreateChannelDesc<texel_type>();
hipMallocArray(&plan->a_in, &ccd, img.width(),img.height());
dimage<float,C> preproc_img;
if(op.pre_filter == FILTER_CARDINAL_BSPLINE3)
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create prefilter plan");
plan->prefilter_recfilter_plan =
recfilter5_create_plan<1>(img.width(),img.height(),img.rowstride(),
weights);
if(timer)
timer->stop();
}
if(op.post_filter == FILTER_CARDINAL_BSPLINE3)
{
preproc_img.resize(img.width(), img.height());
recfilter5_plan *postfilter_plan;
if(op.pre_filter == op.post_filter)
postfilter_plan = plan->prefilter_recfilter_plan;
else
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create postfilter plan");
postfilter_plan = recfilter5_create_plan<1>(img.width(),
img.height(),
img.rowstride(),
weights);
if(timer)
timer->stop();
}
try
{
if(flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",
img.width()*img.height(), "P");
// convolve with a bpsline3^-1 to make a cardinal post-filter
for(int i=0; i<C; ++i)
recfilter5(postfilter_plan, preproc_img[i], img[i]);
if(timer)
timer->stop();
if(flags & VERBOSE)
timer = &timers.gpu_add("copy image to texture",imgsize, "P");
copy_to_array(plan->a_in, dimage_ptr<const float,C>(&preproc_img));
if(timer)
timer->stop();
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
}
catch(...)
{
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
throw;
}
}
else if(op.post_filter == FILTER_SACHT_NEHAB3)
{
Vector<float,2+1> weights_SN3;
weights_SN3[0] = 1.46338646f;
weights_SN3[1] = 0.45884159f;
weights_SN3[2] = 0.00454486f;
preproc_img.resize(img.width(), img.height());
recfilter5_plan *postfilter_plan;
if(op.pre_filter == op.post_filter)
postfilter_plan = plan->prefilter_recfilter_plan;
else
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create postfilter plan");
postfilter_plan = recfilter5_create_plan<2>(img.width(),
img.height(),
img.rowstride(),
weights_SN3);
if(timer)
timer->stop();
}
try
{
if(flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",
img.width()*img.height(), "P");
// convolve with a bpsline3^-1 to make a cardinal post-filter
for(int i=0; i<C; ++i)
recfilter5(postfilter_plan, preproc_img[i], img[i]);
if(timer)
timer->stop();
if(flags & VERBOSE)
timer = &timers.gpu_add("copy image to texture",imgsize, "P");
copy_to_array(plan->a_in, dimage_ptr<const float,C>(&preproc_img));
if(timer)
timer->stop();
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
}
catch(...)
{
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
throw;
}
}
else
{
copy_to_array(plan->a_in, img);
preproc_img = img;
}
cfg::tex().normalized = false;
cfg::tex().filterMode = hipFilterModeLinear;
cfg::tex().addressMode[0] = cfg::tex().addressMode[1] = hipAddressModeClamp;
gpu::copy_to_symbol(filter_op,op);
plan->temp_image.resize(img.width(), img.height());
if(flags & VERBOSE)
timer = &timers.gpu_add("initialize prefilter");
switch(op.pre_filter)
{
case FILTER_BSPLINE3:
case FILTER_CARDINAL_BSPLINE3:
init_pre_filter(&bspline3);
break;
case FILTER_MITCHELL_NETRAVALI:
init_pre_filter(&mitchell_netravali);
break;
}
if(timer)
timer->stop();
switch(op.type)
{
case EFFECT_UNSHARP_MASK:
ccd = hipCreateChannelDesc<float>();
hipMallocArray(&plan->a_aux_float, &ccd, img.width(), img.height());
check_cuda_error("hipMallocArray");
plan->img_aux_float_orig.resize(img.width(), img.height());
luminance(&plan->img_aux_float_orig, &preproc_img);
plan->gaussblur_plan
= gaussian_blur_create_plan(img.width(), img.height(),
img.rowstride(), op.sigma);
plan->img_aux_float.resize(img.width(), img.height());
t_aux_float.normalized = false;
t_aux_float.filterMode = hipFilterModeLinear;
t_aux_float.addressMode[0] = t_aux_float.addressMode[1] = hipAddressModeClamp;
break;
}
return plan;
}/*}}}*/
void free(filter_plan *plan)/*{{{*/
{
delete plan;
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_ss1(dimage_ptr<typename sum_traits<C>::type,KS*KS> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
const int SMEM_SIZE = cfg::smem_size,
REG_SIZE = KS*KS-SMEM_SIZE;
__shared__ sum_type _sum[BH_F1][SMEM_SIZE][BW_F1];
sum_type (*ssum)[BW_F1] = (sum_type (*)[BW_F1]) &_sum[ty][0][tx];
sum_type sum[REG_SIZE];
// Init registers to zero
for(int i=0; i<REG_SIZE; ++i)
sum[i] = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*ssum[i] = sum_traits<C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)-1.5f+0.5f;
float *bspline3 = c_prefilter_data;
S sampler;
for(int s=0; s<SAMPDIM; ++s)
{
#if SAMPDIM==1
pixel_type value = do_filter<OP>(sampler, p);
#else
pixel_type value = do_filter<OP>(sampler, p+c_blue_noise[s]);
#endif
value = srgb2lrgb(value);
// scans through the kernel support, collecting data for each position
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
{
float wij = bspline3[i];
*ssum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += SMEM_SIZE;
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
{
float wij = bspline3[i];
sum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += REG_SIZE;
}
// writes out to gmem what's in the registers
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*out[i] = *ssum[i];
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
*out[SMEM_SIZE+i] = sum[i];
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel1(dimage_ptr<typename sum_traits<C>::type,KS*KS> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
const int SMEM_SIZE = cfg::smem_size,
REG_SIZE = KS*KS-SMEM_SIZE;
__shared__ sum_type _sum[BH_F1][SMEM_SIZE][BW_F1];
sum_type (*ssum)[BW_F1] = (sum_type (*)[BW_F1]) &_sum[ty][0][tx];
sum_type sum[REG_SIZE];
// Init registers to zero
for(int i=0; i<REG_SIZE; ++i)
sum[i] = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*ssum[i] = sum_traits<C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)-1.5f+0.5f;
float *bspline3 = c_prefilter_data;
S sampler;
pixel_type value = do_filter<OP>(sampler, p);
value = srgb2lrgb(value);
// scans through the kernel support, collecting data for each position
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
{
float wij = bspline3[i];
*ssum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += SMEM_SIZE;
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
{
float wij = bspline3[i];
sum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
// writes out to gmem what's in the registers
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*out[i] = *ssum[i];
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
*out[SMEM_SIZE+i] = sum[i];
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_box_ss(dimage_ptr<float,C> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
pixel_type sum = pixel_traits<float,C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)+0.5f;
S sampler;
for(int s=0; s<SAMPDIM; ++s)
{
#if SAMPDIM == 1
pixel_type value = do_filter<OP>(sampler, p);
#else
pixel_type value = do_filter<OP>(sampler, p+c_blue_noise[s]);
#endif
sum += srgb2lrgb(value);
}
*out = sum/float(SAMPDIM);
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_box(dimage_ptr<float,C> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
// top-left position of the kernel support
float2 p = make_float2(x,y)+0.5f;
S sampler;
*out = srgb2lrgb(do_filter<OP>(sampler, p));
}/*}}}*/
template <int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F2*BH_F2, NB_F2)
#endif
void filter_kernel2(dimage_ptr<float,C> out, /*{{{*/
dimage_ptr<const typename sum_traits<C>::type,KS*KS> in)
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F2+tx, y = blockIdx.y*BH_F2+ty;
// out of bounds? goodbye
if(!in.is_inside(x,y))
return;
// in and out points to the input/output pixel we're processing
int idx = in.offset_at(x,y);
in += idx;
out += idx;
// treat corner cases where the support is outside the image
int mi = min(y+KS,in.height())-y,
mj = min(x+KS,in.width())-x;
// sum the contribution of nearby pixels
typename sum_traits<C>::type sum = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<mi; ++i)
{
#pragma unroll
for(int j=0; j<mj; ++j)
{
sum += *in[i*KS+j];
++in;
}
in += in.rowstride()-mj;
}
*out = filter_traits<C>::normalize_sum(sum);
}/*}}}*/
template <class POST_FILTER, int C>
void filter(filter_plan *_plan, dimage_ptr<float,C> out, const filter_operation &op)/*{{{*/
{
filter_plan_C<C> *plan = dynamic_cast<filter_plan_C<C> *>(_plan);
assert(plan != NULL);
if(plan->op.post_filter != op.post_filter)
throw std::runtime_error("Postfilter changed, plan must be recreated");
gpu::copy_to_symbol(filter_op,op);
typedef filter_traits<C> cfg;
assert(plan->temp_image.width() == out.width() &&
plan->temp_image.height() == out.height());
hipBindTextureToArray(cfg::tex(), plan->a_in);
dim3 bdim(BW_F1,BH_F1),
gdim((out.width()+bdim.x-1)/bdim.x, (out.height()+bdim.y-1)/bdim.y);
typedef filter_traits<C> cfg;
base_timer *timer = NULL;
#define CASE(EFFECT) \
case EFFECT:\
if(plan->flags & VERBOSE)\
timer = &timers.gpu_add("First pass",out.width()*out.height(),"P");\
if(op.pre_filter == FILTER_BOX) \
{ \
if(op.use_supersampling) \
hipLaunchKernelGGL(( filter_kernel_box_ss<POST_FILTER,EFFECT>), dim3(gdim), dim3(bdim), 0, 0, out); \
else \
hipLaunchKernelGGL(( filter_kernel_box<POST_FILTER,EFFECT>), dim3(gdim), dim3(bdim), 0, 0, out); \
} \
else \
{ \
if(op.use_supersampling) \
hipLaunchKernelGGL(( filter_kernel_ss1<POST_FILTER,EFFECT,C>), dim3(gdim), dim3(bdim), 0, 0, &plan->temp_image); \
else \
hipLaunchKernelGGL(( filter_kernel1<POST_FILTER,EFFECT,C>), dim3(gdim), dim3(bdim), 0, 0, &plan->temp_image); \
}\
if(timer)\
timer->stop();\
break
switch(op.type)
{
CASE(EFFECT_IDENTITY);
CASE(EFFECT_POSTERIZE);
CASE(EFFECT_SCALE);
CASE(EFFECT_BIAS);
CASE(EFFECT_ROOT);
CASE(EFFECT_THRESHOLD);
CASE(EFFECT_REPLACEMENT);
CASE(EFFECT_GRADIENT_EDGE_DETECTION);
CASE(EFFECT_LAPLACIAN);
CASE(EFFECT_LAPLACE_EDGE_ENHANCEMENT);
CASE(EFFECT_YAROSLAVSKY_BILATERAL);
CASE(EFFECT_BRIGHTNESS_CONTRAST);
CASE(EFFECT_HUE_SATURATION_LIGHTNESS);
CASE(EFFECT_BILATERAL);
CASE(EFFECT_EMBOSS);
case EFFECT_UNSHARP_MASK:
assert(plan->a_aux_float != NULL);
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Gaussian blur on Y channel",out.width()*out.height(),"P");
update_plan(plan->gaussblur_plan,
plan->img_aux_float_orig.width(),
plan->img_aux_float_orig.height(),
plan->img_aux_float_orig.rowstride(),
op.sigma);
gaussian_blur(plan->gaussblur_plan, &plan->img_aux_float,
&plan->img_aux_float_orig);
if(timer)
timer->stop();
copy_to_array(plan->a_aux_float, dimage_ptr<const float>(&plan->img_aux_float));
hipBindTextureToArray(t_aux_float, plan->a_aux_float);
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("First pass",out.width()*out.height(),"P");
if(op.pre_filter == FILTER_BOX)
{
if(op.use_supersampling)
hipLaunchKernelGGL(( filter_kernel_box_ss<POST_FILTER,EFFECT_UNSHARP_MASK>), dim3(gdim), dim3(bdim), 0, 0, out);
else
hipLaunchKernelGGL(( filter_kernel_box<POST_FILTER,EFFECT_UNSHARP_MASK>), dim3(gdim), dim3(bdim), 0, 0, out);
}
else
{
if(op.use_supersampling)
hipLaunchKernelGGL(( filter_kernel_ss1<POST_FILTER,EFFECT_UNSHARP_MASK,C>), dim3(gdim), dim3(bdim), 0, 0, &plan->temp_image);
else
hipLaunchKernelGGL(( filter_kernel1<POST_FILTER,EFFECT_UNSHARP_MASK,C>), dim3(gdim), dim3(bdim), 0, 0, &plan->temp_image);
}
if(timer)
timer->stop();
hipUnbindTexture(t_aux_float);
break;
default:
assert(false);
}
#undef CASE
if(op.pre_filter != FILTER_BOX)
{
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Second pass",out.width()*out.height(),"P");
dim3 bdim(BW_F2,BH_F2),
gdim((out.width()+bdim.x-1)/bdim.x,(out.height()+bdim.y-1)/bdim.y);
hipLaunchKernelGGL(( filter_kernel2<C>), dim3(gdim), dim3(bdim), 0, 0, out, &plan->temp_image);
if(timer)
timer->stop();
}
hipUnbindTexture(cfg::tex());
if(op.pre_filter == FILTER_CARDINAL_BSPLINE3)
{
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",out.width()*out.height(),"P");
// convolve with a bpsline3^-1 to make a cardinal pre-filter
for(int i=0; i<C; ++i)
recfilter5(plan->prefilter_recfilter_plan, out[i]);
if(timer)
timer->stop();
}
// maps back to gamma space
lrgb2srgb(out, out);
}/*}}}*/
template <int C>
void filter(filter_plan *plan, dimage_ptr<float,C> out, const filter_operation &op)/*{{{*/
{
typedef filter_traits<C> cfg;
typedef typename cfg::texfetch_type texfetch;
switch(op.post_filter)
{
case FILTER_BSPLINE3:
case FILTER_CARDINAL_BSPLINE3:
filter<cubic_sampler<bspline3_weights, texfetch>,C>(plan, out, op);
break;
case FILTER_MITCHELL_NETRAVALI:
filter<cubic_sampler<mitchell_netravali_weights,texfetch>, C>(plan, out, op);
break;
case FILTER_BOX:
filter<box_sampler<texfetch>, C>(plan, out, op);
break;
case FILTER_SACHT_NEHAB3:
filter<cubic_sampler<sacht_nehab3_weights, texfetch>,C>(plan, out, op);
break;
default:
assert(false);
}
}
// Grayscale filtering ===================================================/*{{{*/
texture<float, 2, hipReadModeElementType> t_in_gray;
struct texfetch_gray
{
typedef float result_type;
__device__ float operator()(float x, float y)
{
return tex2D(t_in_gray, x, y);
}
};
template <>
struct filter_traits<1>
{
typedef texfetch_gray texfetch_type;
static const int smem_size = 3;
static
texture<float,2,hipReadModeElementType> &tex() { return t_in_gray; }
__device__ static float normalize_sum(float2 sum)
{
return sum.x / sum.y;
}
};
template<>
void copy_to_array(hipArray *out, dimage_ptr<const float> in)
{
hipMemcpy2DToArray(out, 0, 0, in,
in.rowstride()*sizeof(float),
in.width()*sizeof(float), in.height(),
hipMemcpyDeviceToDevice);
}
template
void filter(filter_plan *, dimage_ptr<float,1> img, const filter_operation &op);
template
filter_plan *
filter_create_plan(dimage_ptr<const float,1> img, const filter_operation &op,
int flags);
/*}}}*/
//{{{ RGB filtering =========================================================
texture<float4, 2, hipReadModeElementType> t_in_rgba;
struct texfetch_rgba
{
typedef float3 result_type;
__device__ float3 operator()(float x, float y)
{
return make_float3(tex2D(t_in_rgba, x, y));
}
};
template <>
struct filter_traits<3>
{
typedef texfetch_rgba texfetch_type;
#if CUDA_SM >= 20
static const int smem_size = 5;
#else
static const int smem_size = 3;
#endif
static int flags;
static texture<float4,2,hipReadModeElementType> &tex()
{ return t_in_rgba; }
__device__ static float3 normalize_sum(float4 sum)
{
return make_float3(sum) / sum.w;
}
};
template <>
void copy_to_array(hipArray *out, dimage_ptr<const float,3> img)
{
dimage<float3> temp;
temp.resize(img.width(), img.height());
convert(&temp, img);
hipMemcpy2DToArray(out, 0, 0, temp,
temp.rowstride()*sizeof(float4),
temp.width()*sizeof(float4), temp.height(),
hipMemcpyDeviceToDevice);
}
template
void filter(filter_plan *, dimage_ptr<float,3> img, const filter_operation &op);
template
filter_plan *
filter_create_plan(dimage_ptr<const float,3> img, const filter_operation &op,
int flags);
/*}}}*/
|
2272acdf670c0a021a65a7329802040345d39a95.cu
|
#include "cutil.h"
#include "filter.h"
#include "timer.h"
#include "effects.h"
#include "symbol.h"
#include "recfilter.h"
#include "image_util.h"
#include "blue_noise.h"
#include "cubic_sampler.h"
#include "box_sampler.h"
#include "bspline3.h"
#include "mitchell_netravali.h"
#include "sacht_nehab3.h"
#if CUDA_SM < 20
# include "cuPrintf.cu"
# if __CUDA_ARCH__
# define printf cuPrintf
# endif
#endif
#define USE_LAUNCH_BOUNDS 1
const int BW_F1 = 32, // cuda block width
BH_F1 = 8;
const int BW_F2 = 32,
BH_F2 = 8;
#if USE_LAUNCH_BOUNDS
const int
#if SAMPDIM == 8 && CUDA_SM >= 20
NB_F1 = 2, // number of blocks resident per SM
#else
NB_F1 = 1, // number of blocks resident per SM
#endif
NB_F2 = 4;
#endif
__constant__ float2 c_blue_noise[SAMPDIM];
__constant__ float c_prefilter_data[SAMPDIM*KS*KS];
texture<float, 2, cudaReadModeElementType> t_aux_float;
struct texfetch_aux_float
{
typedef float result_type;
__device__ float operator()(float x, float y)
{
return tex2D(t_aux_float, x, y);
}
};
__constant__ filter_operation filter_op;
// do the actual value processing according to what's in 'filter_op'
template <effect_type OP, class S>
__device__ typename S::result_type do_filter(const S &sampler, float2 pos)
{
typename S::template rebind_sampler<texfetch_aux_float>::type sampler_aux_float;
typedef typename S::result_type result_type;
switch(OP)
{
case EFFECT_POSTERIZE:
return posterize(sampler(pos), filter_op.levels);
case EFFECT_SCALE:
return scale(sampler(pos),filter_op.scale);
case EFFECT_BIAS:
return bias(sampler(pos),filter_op.bias);
case EFFECT_ROOT:
return root(sampler(pos),filter_op.degree);
case EFFECT_THRESHOLD:
return threshold(sampler(pos),filter_op.minimum, filter_op.maximum);
case EFFECT_REPLACEMENT:
return replacement(sampler(pos),
filter_op.old_color,
filter_op.new_color,
filter_op.tau);
case EFFECT_GRADIENT_EDGE_DETECTION:
return gradient_edge_detection(sampler(pos,1,0),sampler(pos,0,1));
case EFFECT_LAPLACIAN:
return laplacian(sampler(pos,2,0),sampler(pos,0,2));
case EFFECT_LAPLACE_EDGE_ENHANCEMENT:
return laplace_edge_enhancement(sampler(pos),
sampler(pos,2,0),sampler(pos,0,2),
filter_op.multiple);
case EFFECT_YAROSLAVSKY_BILATERAL:
return yaroslavsky_bilateral(sampler(pos),
sampler(pos,1,0), sampler(pos,0,1),
sampler(pos,1,1),
sampler(pos,2,0),sampler(pos,0,2),
filter_op.rho, filter_op.h);
case EFFECT_BRIGHTNESS_CONTRAST:
return brightness_contrast(sampler(pos),filter_op.brightness,
filter_op.contrast);
case EFFECT_HUE_SATURATION_LIGHTNESS:
return hue_saturation_lightness(sampler(pos),filter_op.hue,
filter_op.saturation,filter_op.lightness);
case EFFECT_UNSHARP_MASK:
return unsharp_mask(sampler(pos),sampler_aux_float(pos),
filter_op.amount,filter_op.threshold);
case EFFECT_EMBOSS:
{
result_type i0 = sampler(pos),
i1 = sampler(pos+make_float2(-filter_op.offset,
filter_op.offset));
return saturate(filter_op.amount*(i0-i1)+.5f);
}
case EFFECT_BILATERAL:
{
const float scale = 3*filter_op.sigma_s;
// we're using sigma_r*3 to compesate something I don't understand
const float inv_2sigma_r2 = 1.0f/(2*filter_op.sigma_r*filter_op.sigma_r),
inv_2sigma_s2 = 1.0f/(2*filter_op.sigma_s*filter_op.sigma_s);
const result_type center = sampler(pos);
result_type sum_weight = pixel_traits<result_type>::make_pixel(0),
sum_color = pixel_traits<result_type>::make_pixel(0);
const float space = 1.0f/8;
for(int i=0; i<8; ++i)
{
for(int j=0; j<8; ++j)
{
// d = [-0.5+1/16;0.5-1/16]
float2 d = make_float2(j+0.5f, i+0.5f)*space - 0.5f;
d *= scale;
float weight_s = expf(-(d.x*d.x + d.y*d.y)*inv_2sigma_s2);
result_type c = sampler(pos+d),
dc = center-c,
weight = expf(dc*dc*-inv_2sigma_r2)*weight_s;
sum_color += c*weight;
sum_weight += weight;
}
}
return sum_color / sum_weight;
}
case EFFECT_IDENTITY:
default:
return sampler(pos);
}
}
template <int C>
struct filter_traits {};
template <int C>
struct sum_traits
: pixel_traits<float,C+1>
{
typedef typename pixel_traits<float,C+1>::pixel_type type;
};
struct filter_plan
{
filter_plan()
: a_in(NULL)
, a_aux_float(NULL)
, prefilter_recfilter_plan(NULL)
, gaussblur_plan(NULL)
{
}
virtual ~filter_plan()
{
free(prefilter_recfilter_plan);
if(a_in)
cudaFreeArray(a_in);
if(a_aux_float)
cudaFreeArray(a_aux_float);
free(gaussblur_plan);
}
int flags;
cudaArray *a_in, *a_aux_float;
dimage<float> img_aux_float_orig,
img_aux_float;
filter_operation op;
recfilter5_plan *prefilter_recfilter_plan;
gaussian_blur_plan *gaussblur_plan;
};
template <int C>
struct filter_plan_C : filter_plan
{
dimage<typename sum_traits<C>::type,KS*KS> temp_image;
};
template<int C>
void copy_to_array(cudaArray *out, dimage_ptr<const float,C> in);
void init_pre_filter(float (*prefilter)(float))
{
std::vector<float2> blue_noise;
std::vector<float> prefilter_data;
blue_noise.reserve(SAMPDIM);
prefilter_data.reserve(SAMPDIM*KS*KS);
for(int i=0; i<SAMPDIM; ++i)
{
float2 n = make_float2(blue_noise_x[i], blue_noise_y[i]);
blue_noise.push_back(n);
for(int y=0; y<KS; ++y)
{
for(int x=0; x<KS; ++x)
{
prefilter_data.push_back(prefilter(x+n.x-1.5)*
prefilter(y+n.y-1.5)/SAMPDIM);
}
}
}
gpu::copy_to_symbol(c_blue_noise,blue_noise);
gpu::copy_to_symbol(c_prefilter_data,prefilter_data);
}
template<int C>
filter_plan *
filter_create_plan(dimage_ptr<const float,C> img, const filter_operation &op,/*{{{*/
int flags)
{
assert(!img.empty());
typedef filter_traits<C> cfg;
typedef typename pixel_traits<float,C>::texel_type texel_type;
typedef typename sum_traits<C>::type sum_type;
filter_plan_C<C> *plan = new filter_plan_C<C>;
plan->flags = flags;
plan->op = op;
int imgsize = img.width()*img.height();
Vector<float,1+1> weights;
// calculate cubic b-spline weights
float a = 2.f-std::sqrt(3.0f);
weights[0] = 1+a;
weights[1] = a;
base_timer *timer = NULL;
// copy the input data to a texture
cudaChannelFormatDesc ccd = cudaCreateChannelDesc<texel_type>();
cudaMallocArray(&plan->a_in, &ccd, img.width(),img.height());
dimage<float,C> preproc_img;
if(op.pre_filter == FILTER_CARDINAL_BSPLINE3)
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create prefilter plan");
plan->prefilter_recfilter_plan =
recfilter5_create_plan<1>(img.width(),img.height(),img.rowstride(),
weights);
if(timer)
timer->stop();
}
if(op.post_filter == FILTER_CARDINAL_BSPLINE3)
{
preproc_img.resize(img.width(), img.height());
recfilter5_plan *postfilter_plan;
if(op.pre_filter == op.post_filter)
postfilter_plan = plan->prefilter_recfilter_plan;
else
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create postfilter plan");
postfilter_plan = recfilter5_create_plan<1>(img.width(),
img.height(),
img.rowstride(),
weights);
if(timer)
timer->stop();
}
try
{
if(flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",
img.width()*img.height(), "P");
// convolve with a bpsline3^-1 to make a cardinal post-filter
for(int i=0; i<C; ++i)
recfilter5(postfilter_plan, preproc_img[i], img[i]);
if(timer)
timer->stop();
if(flags & VERBOSE)
timer = &timers.gpu_add("copy image to texture",imgsize, "P");
copy_to_array(plan->a_in, dimage_ptr<const float,C>(&preproc_img));
if(timer)
timer->stop();
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
}
catch(...)
{
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
throw;
}
}
else if(op.post_filter == FILTER_SACHT_NEHAB3)
{
Vector<float,2+1> weights_SN3;
weights_SN3[0] = 1.46338646f;
weights_SN3[1] = 0.45884159f;
weights_SN3[2] = 0.00454486f;
preproc_img.resize(img.width(), img.height());
recfilter5_plan *postfilter_plan;
if(op.pre_filter == op.post_filter)
postfilter_plan = plan->prefilter_recfilter_plan;
else
{
if(flags & VERBOSE)
timer = &timers.gpu_add("create postfilter plan");
postfilter_plan = recfilter5_create_plan<2>(img.width(),
img.height(),
img.rowstride(),
weights_SN3);
if(timer)
timer->stop();
}
try
{
if(flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",
img.width()*img.height(), "P");
// convolve with a bpsline3^-1 to make a cardinal post-filter
for(int i=0; i<C; ++i)
recfilter5(postfilter_plan, preproc_img[i], img[i]);
if(timer)
timer->stop();
if(flags & VERBOSE)
timer = &timers.gpu_add("copy image to texture",imgsize, "P");
copy_to_array(plan->a_in, dimage_ptr<const float,C>(&preproc_img));
if(timer)
timer->stop();
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
}
catch(...)
{
if(postfilter_plan != plan->prefilter_recfilter_plan)
free(postfilter_plan);
throw;
}
}
else
{
copy_to_array(plan->a_in, img);
preproc_img = img;
}
cfg::tex().normalized = false;
cfg::tex().filterMode = cudaFilterModeLinear;
cfg::tex().addressMode[0] = cfg::tex().addressMode[1] = cudaAddressModeClamp;
gpu::copy_to_symbol(filter_op,op);
plan->temp_image.resize(img.width(), img.height());
if(flags & VERBOSE)
timer = &timers.gpu_add("initialize prefilter");
switch(op.pre_filter)
{
case FILTER_BSPLINE3:
case FILTER_CARDINAL_BSPLINE3:
init_pre_filter(&bspline3);
break;
case FILTER_MITCHELL_NETRAVALI:
init_pre_filter(&mitchell_netravali);
break;
}
if(timer)
timer->stop();
switch(op.type)
{
case EFFECT_UNSHARP_MASK:
ccd = cudaCreateChannelDesc<float>();
cudaMallocArray(&plan->a_aux_float, &ccd, img.width(), img.height());
check_cuda_error("cudaMallocArray");
plan->img_aux_float_orig.resize(img.width(), img.height());
luminance(&plan->img_aux_float_orig, &preproc_img);
plan->gaussblur_plan
= gaussian_blur_create_plan(img.width(), img.height(),
img.rowstride(), op.sigma);
plan->img_aux_float.resize(img.width(), img.height());
t_aux_float.normalized = false;
t_aux_float.filterMode = cudaFilterModeLinear;
t_aux_float.addressMode[0] = t_aux_float.addressMode[1] = cudaAddressModeClamp;
break;
}
return plan;
}/*}}}*/
void free(filter_plan *plan)/*{{{*/
{
delete plan;
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_ss1(dimage_ptr<typename sum_traits<C>::type,KS*KS> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
const int SMEM_SIZE = cfg::smem_size,
REG_SIZE = KS*KS-SMEM_SIZE;
__shared__ sum_type _sum[BH_F1][SMEM_SIZE][BW_F1];
sum_type (*ssum)[BW_F1] = (sum_type (*)[BW_F1]) &_sum[ty][0][tx];
sum_type sum[REG_SIZE];
// Init registers to zero
for(int i=0; i<REG_SIZE; ++i)
sum[i] = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*ssum[i] = sum_traits<C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)-1.5f+0.5f;
float *bspline3 = c_prefilter_data;
S sampler;
for(int s=0; s<SAMPDIM; ++s)
{
#if SAMPDIM==1
pixel_type value = do_filter<OP>(sampler, p);
#else
pixel_type value = do_filter<OP>(sampler, p+c_blue_noise[s]);
#endif
value = srgb2lrgb(value);
// scans through the kernel support, collecting data for each position
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
{
float wij = bspline3[i];
*ssum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += SMEM_SIZE;
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
{
float wij = bspline3[i];
sum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += REG_SIZE;
}
// writes out to gmem what's in the registers
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*out[i] = *ssum[i];
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
*out[SMEM_SIZE+i] = sum[i];
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel1(dimage_ptr<typename sum_traits<C>::type,KS*KS> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
const int SMEM_SIZE = cfg::smem_size,
REG_SIZE = KS*KS-SMEM_SIZE;
__shared__ sum_type _sum[BH_F1][SMEM_SIZE][BW_F1];
sum_type (*ssum)[BW_F1] = (sum_type (*)[BW_F1]) &_sum[ty][0][tx];
sum_type sum[REG_SIZE];
// Init registers to zero
for(int i=0; i<REG_SIZE; ++i)
sum[i] = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*ssum[i] = sum_traits<C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)-1.5f+0.5f;
float *bspline3 = c_prefilter_data;
S sampler;
pixel_type value = do_filter<OP>(sampler, p);
value = srgb2lrgb(value);
// scans through the kernel support, collecting data for each position
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
{
float wij = bspline3[i];
*ssum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
bspline3 += SMEM_SIZE;
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
{
float wij = bspline3[i];
sum[i] += sum_traits<C>::make_pixel(value*wij, wij);
}
// writes out to gmem what's in the registers
#pragma unroll
for(int i=0; i<SMEM_SIZE; ++i)
*out[i] = *ssum[i];
#pragma unroll
for(int i=0; i<REG_SIZE; ++i)
*out[SMEM_SIZE+i] = sum[i];
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_box_ss(dimage_ptr<float,C> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
pixel_type sum = pixel_traits<float,C>::make_pixel(0);
// top-left position of the kernel support
float2 p = make_float2(x,y)+0.5f;
S sampler;
for(int s=0; s<SAMPDIM; ++s)
{
#if SAMPDIM == 1
pixel_type value = do_filter<OP>(sampler, p);
#else
pixel_type value = do_filter<OP>(sampler, p+c_blue_noise[s]);
#endif
sum += srgb2lrgb(value);
}
*out = sum/float(SAMPDIM);
}/*}}}*/
template <class S, effect_type OP,int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F1*BH_F1, NB_F1)
#endif
void filter_kernel_box(dimage_ptr<float,C> out)/*{{{*/
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F1+tx, y = blockIdx.y*BH_F1+ty;
if(!out.is_inside(x,y))
return;
// output will point to the pixel we're processing now
int idx = out.offset_at(x,y);
out += idx;
// we're using some smem as registers not to blow up the register space,
// here we define how much 'registers' are in smem, the rest is used
// in regular registers
typedef filter_traits<C> cfg;
typedef typename sum_traits<C>::type sum_type;
typedef typename pixel_traits<float,C>::pixel_type pixel_type;
// top-left position of the kernel support
float2 p = make_float2(x,y)+0.5f;
S sampler;
*out = srgb2lrgb(do_filter<OP>(sampler, p));
}/*}}}*/
template <int C>
__global__
#if USE_LAUNCH_BOUNDS
__launch_bounds__(BW_F2*BH_F2, NB_F2)
#endif
void filter_kernel2(dimage_ptr<float,C> out, /*{{{*/
dimage_ptr<const typename sum_traits<C>::type,KS*KS> in)
{
int tx = threadIdx.x, ty = threadIdx.y;
int x = blockIdx.x*BW_F2+tx, y = blockIdx.y*BH_F2+ty;
// out of bounds? goodbye
if(!in.is_inside(x,y))
return;
// in and out points to the input/output pixel we're processing
int idx = in.offset_at(x,y);
in += idx;
out += idx;
// treat corner cases where the support is outside the image
int mi = min(y+KS,in.height())-y,
mj = min(x+KS,in.width())-x;
// sum the contribution of nearby pixels
typename sum_traits<C>::type sum = sum_traits<C>::make_pixel(0);
#pragma unroll
for(int i=0; i<mi; ++i)
{
#pragma unroll
for(int j=0; j<mj; ++j)
{
sum += *in[i*KS+j];
++in;
}
in += in.rowstride()-mj;
}
*out = filter_traits<C>::normalize_sum(sum);
}/*}}}*/
template <class POST_FILTER, int C>
void filter(filter_plan *_plan, dimage_ptr<float,C> out, const filter_operation &op)/*{{{*/
{
filter_plan_C<C> *plan = dynamic_cast<filter_plan_C<C> *>(_plan);
assert(plan != NULL);
if(plan->op.post_filter != op.post_filter)
throw std::runtime_error("Postfilter changed, plan must be recreated");
gpu::copy_to_symbol(filter_op,op);
typedef filter_traits<C> cfg;
assert(plan->temp_image.width() == out.width() &&
plan->temp_image.height() == out.height());
cudaBindTextureToArray(cfg::tex(), plan->a_in);
dim3 bdim(BW_F1,BH_F1),
gdim((out.width()+bdim.x-1)/bdim.x, (out.height()+bdim.y-1)/bdim.y);
typedef filter_traits<C> cfg;
base_timer *timer = NULL;
#define CASE(EFFECT) \
case EFFECT:\
if(plan->flags & VERBOSE)\
timer = &timers.gpu_add("First pass",out.width()*out.height(),"P");\
if(op.pre_filter == FILTER_BOX) \
{ \
if(op.use_supersampling) \
filter_kernel_box_ss<POST_FILTER,EFFECT><<<gdim, bdim>>>(out); \
else \
filter_kernel_box<POST_FILTER,EFFECT><<<gdim, bdim>>>(out); \
} \
else \
{ \
if(op.use_supersampling) \
filter_kernel_ss1<POST_FILTER,EFFECT,C><<<gdim, bdim>>>(&plan->temp_image); \
else \
filter_kernel1<POST_FILTER,EFFECT,C><<<gdim, bdim>>>(&plan->temp_image); \
}\
if(timer)\
timer->stop();\
break
switch(op.type)
{
CASE(EFFECT_IDENTITY);
CASE(EFFECT_POSTERIZE);
CASE(EFFECT_SCALE);
CASE(EFFECT_BIAS);
CASE(EFFECT_ROOT);
CASE(EFFECT_THRESHOLD);
CASE(EFFECT_REPLACEMENT);
CASE(EFFECT_GRADIENT_EDGE_DETECTION);
CASE(EFFECT_LAPLACIAN);
CASE(EFFECT_LAPLACE_EDGE_ENHANCEMENT);
CASE(EFFECT_YAROSLAVSKY_BILATERAL);
CASE(EFFECT_BRIGHTNESS_CONTRAST);
CASE(EFFECT_HUE_SATURATION_LIGHTNESS);
CASE(EFFECT_BILATERAL);
CASE(EFFECT_EMBOSS);
case EFFECT_UNSHARP_MASK:
assert(plan->a_aux_float != NULL);
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Gaussian blur on Y channel",out.width()*out.height(),"P");
update_plan(plan->gaussblur_plan,
plan->img_aux_float_orig.width(),
plan->img_aux_float_orig.height(),
plan->img_aux_float_orig.rowstride(),
op.sigma);
gaussian_blur(plan->gaussblur_plan, &plan->img_aux_float,
&plan->img_aux_float_orig);
if(timer)
timer->stop();
copy_to_array(plan->a_aux_float, dimage_ptr<const float>(&plan->img_aux_float));
cudaBindTextureToArray(t_aux_float, plan->a_aux_float);
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("First pass",out.width()*out.height(),"P");
if(op.pre_filter == FILTER_BOX)
{
if(op.use_supersampling)
filter_kernel_box_ss<POST_FILTER,EFFECT_UNSHARP_MASK><<<gdim, bdim>>>(out);
else
filter_kernel_box<POST_FILTER,EFFECT_UNSHARP_MASK><<<gdim, bdim>>>(out);
}
else
{
if(op.use_supersampling)
filter_kernel_ss1<POST_FILTER,EFFECT_UNSHARP_MASK,C><<<gdim, bdim>>>(&plan->temp_image);
else
filter_kernel1<POST_FILTER,EFFECT_UNSHARP_MASK,C><<<gdim, bdim>>>(&plan->temp_image);
}
if(timer)
timer->stop();
cudaUnbindTexture(t_aux_float);
break;
default:
assert(false);
}
#undef CASE
if(op.pre_filter != FILTER_BOX)
{
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Second pass",out.width()*out.height(),"P");
dim3 bdim(BW_F2,BH_F2),
gdim((out.width()+bdim.x-1)/bdim.x,(out.height()+bdim.y-1)/bdim.y);
filter_kernel2<C><<<gdim, bdim>>>(out, &plan->temp_image);
if(timer)
timer->stop();
}
cudaUnbindTexture(cfg::tex());
if(op.pre_filter == FILTER_CARDINAL_BSPLINE3)
{
if(plan->flags & VERBOSE)
timer = &timers.gpu_add("Convolve with bspline3^-1",out.width()*out.height(),"P");
// convolve with a bpsline3^-1 to make a cardinal pre-filter
for(int i=0; i<C; ++i)
recfilter5(plan->prefilter_recfilter_plan, out[i]);
if(timer)
timer->stop();
}
// maps back to gamma space
lrgb2srgb(out, out);
}/*}}}*/
template <int C>
void filter(filter_plan *plan, dimage_ptr<float,C> out, const filter_operation &op)/*{{{*/
{
typedef filter_traits<C> cfg;
typedef typename cfg::texfetch_type texfetch;
switch(op.post_filter)
{
case FILTER_BSPLINE3:
case FILTER_CARDINAL_BSPLINE3:
filter<cubic_sampler<bspline3_weights, texfetch>,C>(plan, out, op);
break;
case FILTER_MITCHELL_NETRAVALI:
filter<cubic_sampler<mitchell_netravali_weights,texfetch>, C>(plan, out, op);
break;
case FILTER_BOX:
filter<box_sampler<texfetch>, C>(plan, out, op);
break;
case FILTER_SACHT_NEHAB3:
filter<cubic_sampler<sacht_nehab3_weights, texfetch>,C>(plan, out, op);
break;
default:
assert(false);
}
}
// Grayscale filtering ===================================================/*{{{*/
texture<float, 2, cudaReadModeElementType> t_in_gray;
struct texfetch_gray
{
typedef float result_type;
__device__ float operator()(float x, float y)
{
return tex2D(t_in_gray, x, y);
}
};
template <>
struct filter_traits<1>
{
typedef texfetch_gray texfetch_type;
static const int smem_size = 3;
static
texture<float,2,cudaReadModeElementType> &tex() { return t_in_gray; }
__device__ static float normalize_sum(float2 sum)
{
return sum.x / sum.y;
}
};
template<>
void copy_to_array(cudaArray *out, dimage_ptr<const float> in)
{
cudaMemcpy2DToArray(out, 0, 0, in,
in.rowstride()*sizeof(float),
in.width()*sizeof(float), in.height(),
cudaMemcpyDeviceToDevice);
}
template
void filter(filter_plan *, dimage_ptr<float,1> img, const filter_operation &op);
template
filter_plan *
filter_create_plan(dimage_ptr<const float,1> img, const filter_operation &op,
int flags);
/*}}}*/
//{{{ RGB filtering =========================================================
texture<float4, 2, cudaReadModeElementType> t_in_rgba;
struct texfetch_rgba
{
typedef float3 result_type;
__device__ float3 operator()(float x, float y)
{
return make_float3(tex2D(t_in_rgba, x, y));
}
};
template <>
struct filter_traits<3>
{
typedef texfetch_rgba texfetch_type;
#if CUDA_SM >= 20
static const int smem_size = 5;
#else
static const int smem_size = 3;
#endif
static int flags;
static texture<float4,2,cudaReadModeElementType> &tex()
{ return t_in_rgba; }
__device__ static float3 normalize_sum(float4 sum)
{
return make_float3(sum) / sum.w;
}
};
template <>
void copy_to_array(cudaArray *out, dimage_ptr<const float,3> img)
{
dimage<float3> temp;
temp.resize(img.width(), img.height());
convert(&temp, img);
cudaMemcpy2DToArray(out, 0, 0, temp,
temp.rowstride()*sizeof(float4),
temp.width()*sizeof(float4), temp.height(),
cudaMemcpyDeviceToDevice);
}
template
void filter(filter_plan *, dimage_ptr<float,3> img, const filter_operation &op);
template
filter_plan *
filter_create_plan(dimage_ptr<const float,3> img, const filter_operation &op,
int flags);
/*}}}*/
|
aa6ba3ee890c963762cd9423246bba498bb9afc8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y + blockIdx.y * blockDim.y;
uchar4 image = rgbaImage[tx + ty * numCols];
float R = image.x;
float G = image.y;
float B = image.z;
float A = image.w;
greyImage[tx + ty * numCols] = 0.299f * R + 0.587f * G + 0.114f * B;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int block_width = 16;
int block_height = 16;
int grid_width = numCols / block_width;
if (grid_width * block_width < numCols)
grid_width++;
int grid_height = numRows / block_height;
if (grid_height * block_height < numRows)
grid_height++;
const dim3 blockSize(block_width, block_height ); //TODO
const dim3 gridSize( grid_width, grid_height ); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
|
aa6ba3ee890c963762cd9423246bba498bb9afc8.cu
|
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int ty = threadIdx.y + blockIdx.y * blockDim.y;
uchar4 image = rgbaImage[tx + ty * numCols];
float R = image.x;
float G = image.y;
float B = image.z;
float A = image.w;
greyImage[tx + ty * numCols] = 0.299f * R + 0.587f * G + 0.114f * B;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int block_width = 16;
int block_height = 16;
int grid_width = numCols / block_width;
if (grid_width * block_width < numCols)
grid_width++;
int grid_height = numRows / block_height;
if (grid_height * block_height < numRows)
grid_height++;
const dim3 blockSize(block_width, block_height ); //TODO
const dim3 gridSize( grid_width, grid_height ); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
21d364b132d2f2114a4fb437effae3c20ec7e5f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height) {
for (int r = 0; r < height; r++) {
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; c++) {
float element = row[c];
}
}
}
int main() {
//host code
int width = 64, height = 64;
float* devPtr;
size_t pitch;
hipMallocPitch(&devPtr, &pitch, width * sizeof(float), height);
MyKernel << <100, 512 >> > (devPtr, pitch, width, height);
}
|
21d364b132d2f2114a4fb437effae3c20ec7e5f6.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height) {
for (int r = 0; r < height; r++) {
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; c++) {
float element = row[c];
}
}
}
int main() {
//host code
int width = 64, height = 64;
float* devPtr;
size_t pitch;
cudaMallocPitch(&devPtr, &pitch, width * sizeof(float), height);
MyKernel << <100, 512 >> > (devPtr, pitch, width, height);
}
|
62e3c1a4cfaaaac098c3f0993be9177734d5dcbf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This is the CUDA implementation of a close approximation of the morphological
* Laplacian operator edge detection filter, along with other filters discovered
* by experimentation. The CUDA SDK sample Box Filter was used as a base to
* modify and expand on, and the copyright verbage for the code still present
* is included below as requested by NVIDIA.
*
* Ian Calegory, 12/20/2016
*/
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#ifndef _LAPLACIANFILTER_KERNEL_H_
#define _LAPLACIANFILTER_KERNEL_H_
//#include <array>
//#include <cstdlib>
#include "laplacianFilter.h"
#include <helper_math.h>
#include <helper_functions.h>
texture<float, 2> tex;
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
hipArray *d_array, *d_tempArray;
const int CHANNEL_COUNT = 4;
int disk3x3StructuringElement[] =
{
0, 1, 0,
1, 1, 1,
0, 1, 0
};
int disk5x5StructuringElement[] =
{
0, 1, 1, 1, 0,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
0, 1, 1, 1, 0
};
int disk7x7StructuringElement[] =
{
0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 0, 0
};
int square3x3StructuringElement[] =
{
1, 1, 1,
1, 1, 1,
1, 1, 1
};
int square5x5StructuringElement[] =
{
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1
};
int square7x7StructuringElement[] =
{
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1
};
int ring3x3StructuringElement[] =
{
0, 1, 0,
1, 0, 1,
0, 1, 0
};
int ring5x5StructuringElement[] =
{
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0
};
int ring7x7StructuringElement[] =
{
0, 0, 1, 1, 1, 0, 0,
0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 1, 1, 0, 0
};
// C++11 style arrays are not easy to use in device code
//std::array<std::array<int, 3>, 3> disk3x3StructuringElement{ {
// { { 0, 1, 0 } },
// { { 1, 1, 1 } },
// { { 0, 1, 0 } }
// } };
// Would be nice to be able to use this or something like it:
//auto &structuringElement = disk5x5StructuringElement;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
extern "C"
void initTexture(int width, int height, void *pImage, bool useRGBA)
{
int size = width * height * (useRGBA ? sizeof(uchar4) : sizeof(float));
// copy image data to array
hipChannelFormatDesc channelDesc;
if (useRGBA)
{
channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
}
else
{
channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
}
checkCudaErrors(hipMallocArray(&d_array, &channelDesc, width, height));
checkCudaErrors(hipMemcpyToArray(d_array, 0, 0, pImage, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMallocArray(&d_tempArray, &channelDesc, width, height));
// set texture parameters
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
tex.filterMode = hipFilterModePoint;
tex.normalized = true;
// Bind the array to the texture
if (useRGBA)
{
checkCudaErrors(hipBindTextureToArray(rgbaTex, d_array, channelDesc));
}
else
{
checkCudaErrors(hipBindTextureToArray(tex, d_array, channelDesc));
}
}
extern "C"
void freeTextures()
{
checkCudaErrors(hipFreeArray(d_array));
checkCudaErrors(hipFreeArray(d_tempArray));
}
// This is used to convert tex2D() call results into the byte components
__device__ void convertTexFloatToUChar(uchar4* dst, const float4 src)
{
//const unsigned int idx = getTextureIndex();
//_dst[idx].x = (unsigned char)(_src[idx].x * 255.9999f);
//_dst[idx].y = (unsigned char)(_src[idx].y * 255.9999f);
//_dst[idx].z = (unsigned char)(_src[idx].z * 255.9999f);
//_dst[idx].w = (unsigned char)(_src[idx].w * 255.9999f);
(*dst).x = (unsigned char)(src.x * 255.9999f);
(*dst).y = (unsigned char)(src.y * 255.9999f);
(*dst).z = (unsigned char)(src.z * 255.9999f);
(*dst).w = (unsigned char)(src.w * 255.9999f);
}
/*
Perform 2D morphological Laplacian operator (approximately? along with a number
of variations) on image using CUDA
This works by calculating the dilation and erosion of the image using the structuring
element centered on the current pixel being processed. It's passed in as the array
d_structuringElement, which is a 2d array flattened into a 1d array for passing into
CUDA with device hipMemcpyHostToDevice calls. Dilation is computed by finding the
maximum r, g, and b values for the pixels around the current pixel determined by the
mask of the structuring element. (If the and of the masking structuring element pixel
and the source image pixel in the corresponding position with the mask overlaid onto
the source image is 1, include that pixel in the source of pixels for choosing maximum
values.)
Erosion is computed similarly, though replacing the source pixel with the components
having the minimum instead of maximum values.
Dilation results in what's called an internal gradient, while erosion results in an
external gradient. For further reference on computing the internal and external
gradients, see for example http://www.inf.u-szeged.hu/ssip/1996/morpho/morphology.html
The grayscale filter uses the luminosity algorithm for converting to grayscale:
0.21 R + 0.72 G + 0.07 B
--Ian Calegory, 12/20/2016
// Comment from original box filter left here for reference--so as a reminder to check
// for coalescence
Note that the x (row) pass suffers from uncoalesced global memory reads,
since each thread is reading from a different row. For this reason it is
better to use texture lookups for the x pass.
The y (column) pass is perfectly coalesced.
Parameters:
id - pointer to input image in device memory (not used here--texture is used instead)
od - pointer to destination image in device memory
w - image width
h - image height
d_structuringElement - element 0 of the structuring element array
n - structuring element is nxn matrix
*/
__global__ void
d_laplacianFilter_rgba(unsigned char *id, unsigned char *od, int w, int h, FilterTypeEnum filter, int* d_structuringElement, unsigned int n)
{
unsigned int colIndex = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int rowIndex = blockIdx.y*blockDim.y + threadIdx.y;
if (rowIndex < h && colIndex < w) {
unsigned int index = rowIndex*w*CHANNEL_COUNT + colIndex*CHANNEL_COUNT;
//if (colIndex > 1085)
//printf("colIndex, %d, rowIndex, %d, pixelContents, %d, textureContents, %d\n", colIndex, rowIndex, *(id + index), tex2D(rgbaTex, colIndex, rowIndex));
//printf("w, %d, h, %d, colIndex, %d, rowIndex, %d\n", w, h, colIndex, rowIndex);
// Convert float4 texture info to uchar4 to extract r, g, b, and a components
float4 texelCenter = tex2D(rgbaTex, colIndex, rowIndex);
uchar4 bytesCenterPixel;
bytesCenterPixel.x = (unsigned char)(texelCenter.x * 255.9999f);
bytesCenterPixel.y = (unsigned char)(texelCenter.y * 255.9999f);
bytesCenterPixel.z = (unsigned char)(texelCenter.z * 255.9999f);
bytesCenterPixel.w = (unsigned char)(texelCenter.w * 255.9999f);
//printf("r=%d, g=%d, b=%d, a=%d, texel=%d ", bytes.x, bytes.y, bytes.z, bytes.w, texel);
// Now cycle through every pixel of the structuring element, and process
// both dilation and erosion of the original image.
unsigned char rMin = 255, gMin = 255, bMin = 255;
unsigned char rMax = 0, gMax = 0, bMax = 0;
int maxVert = n / 2;
// Treat k as the structuring element's x coordinate
for (int k = -maxVert; k <= maxVert; k++)
{
int maxHoriz = n / 2;
// Treat l as the structuring element's y coordinate
for (int l = -maxHoriz; l <= maxHoriz; l++)
{
// Make sure that the structuring element has a value of 1 in the position being processed,
// and that the point with the structuring element overlaid is also inside the bounds of the image.
if (d_structuringElement[(k + maxVert)*n + (l + maxHoriz)] == 1 && rowIndex + k >= 0 && rowIndex + k < h && colIndex + l >= 0 && colIndex + l < w)
{
// Determine offset [NOT USED HERE SINCE CUDA VERSION USES TEXTURE INSTEAD OF RAW MEMORY]
//int offset = k*w*CHANNEL_COUNT + l*CHANNEL_COUNT;
// Convert float4 texture info to uchar4 to extract r, g, b, and a components
float4 texel = tex2D(rgbaTex, colIndex + l, rowIndex + k);
// bytes.x = red, bytes.y = green, bytes.z = blue, bytes.w = alpha
uchar4 bytes;
convertTexFloatToUChar(&bytes, texel);
//printf("r=%d, g=%d, b=%d, a=%d, texel=%d; ", bytes.x, bytes.y, bytes.z, bytes.w, texel);
// Checks for dilation
if (bytes.x > rMax)
rMax = bytes.x;
if (bytes.y > gMax)
gMax = bytes.y;
if (bytes.z > bMax)
bMax = bytes.z;
// Checks for erosion
if (bytes.x < rMin)
rMin = bytes.x;
if (bytes.y < gMin)
gMin = bytes.y;
if (bytes.z < bMin)
bMin = bytes.z;
/*
// The following method is for raw image memory manipulation by pointers and offsets
// from the CPU version
// Checks for dilation
if ((*(id + index + offset)) > rMax)
rMax = (unsigned char)(*(id + index + offset));
if ((*(id + index + offset + 1)) > gMax)
gMax = (unsigned char)(*(id + index + offset + 1));
if ((*(id + index + offset + 2)) > bMax)
bMax = (unsigned char)(*(id + index + offset + 2));
// Checks for erosion
if ((*(id + index + offset)) < rMin)
rMin = (unsigned char)(*(id + index + offset));
if ((*(id + index + offset + 1)) < gMin)
gMin = (unsigned char)(*(id + index + offset + 1));
if ((*(id + index + offset + 2)) < bMin)
bMin = (unsigned char)(*(id + index + offset + 2));
*/
}
}
}
// Filter according to which filter is selected
switch(filter)
{
case(FilterTypeEnum::AlmostAReference):
// This is very succinct and crisp and clear! Mostly black, which outlines etched in sharp white
// THE BEST OUT OF ALL OF THEM -- and, the closest to the reference Laplacian image provided
*(od + index) = ((rMax + rMin) / 2 - bytesCenterPixel.x) >= 0 ? (unsigned char)((rMax + rMin) / 2 - bytesCenterPixel.x) : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytesCenterPixel.y) >= 0 ? (unsigned char)((gMax + gMin) / 2 - bytesCenterPixel.y) : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytesCenterPixel.z) >= 0 ? (unsigned char)((bMax + bMin) / 2 - bytesCenterPixel.z) : 0;
break;
case(FilterTypeEnum::AlmostFlattened):
// Looks like very succinct three shades of gray
// This is a luminosity-type conversion to grayscale
unsigned char red = (unsigned char)((((rMax + rMin) / 2 - bytesCenterPixel.x)/2 + 255)*0.21);
unsigned char green = (unsigned char)((((gMax + gMin) / 2 - bytesCenterPixel.y)/2 + 255)*0.72);
unsigned char blue = (unsigned char)((((bMax + bMin) / 2 - bytesCenterPixel.z)/2 + 255)*0.07);
//*dst = ((rMax + rMin) / 2 - *index) >= 0 ? red+green+blue : 0;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) >= 0 ? red+green+blue : 0;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) >= 0 ? red+green+blue : 0;
*(od + index) = red + green + blue;
(*(od + index + 1)) = red + green + blue;
(*(od + index + 2)) = red + green + blue;
break;
case(FilterTypeEnum::AntiAliasingSmoothFuzz):
// Excellent and very succinct outlines! Colorizes to blue and yellow (BUT NOT IN THE
// CUDA VERSION FOR SOME REASON!!)
// This is the Laplacian according to http://www.mif.vu.lt/atpazinimas/dip/FIP/fip-Morpholo.html
// which defines it as (dilation+erosion-2*source).
// (Wow, the order of operations of the green and blue commands was mistaken in the CPU version,
// which produced though a really cool filter effect--but oddly does not seem reproducible in
// this CUDA version!)
//(*(od + index + 1)) = (unsigned char)(gMax + gMin - 2 * bytesCenterPixel.y / 2);
//(*(od + index + 2)) = (unsigned char)(bMax + bMin - 2 * bytesCenterPixel.z / 2);
*(od + index) = (unsigned char)((rMax + rMin - 2*bytesCenterPixel.x)/2);
(*(od + index + 1)) = (unsigned char)((gMax + gMin - 2* bytesCenterPixel.y)/2);
(*(od + index + 2)) = (unsigned char)((bMax + bMin - 2* bytesCenterPixel.z)/2);
break;
case(FilterTypeEnum::FuzzInWideOutline):
// This is wrong--used src instead of index, but it produces a unique result--
// good gray outlines, though rest of image is fuzzy. Src is the location
// of the first pixel in the original CPU code, and its behavior is emulated
// here by getting the texel at the 0,0 position.
float4 texel2 = tex2D(rgbaTex, 0, 0);
uchar4 bytes2;
convertTexFloatToUChar(&bytes2, texel2);
*(od + index) = ((rMax + rMin) / 2 - bytes2.x) >= 0 ? ((rMax + rMin) / 2 - bytes2.x) : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytes2.y) >= 0 ? ((gMax + gMin) / 2 - bytes2.y) : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytes2.z) >= 0 ? ((bMax + bMin) / 2 - bytes2.z) : 0;
break;
case(FilterTypeEnum::GhostEdges):
// From imageJ (very similar to the clamping method below found in imageJ)
*(od + index) = clamp(rMax - rMin + 128, 0, 255);
(*(od + index + 1)) = clamp(gMax - gMin + 128, 0, 255);
(*(od + index + 2)) = clamp(bMax - bMin + 128, 0, 255);
break;
case(FilterTypeEnum::InvisoWithWideOutlines):
// Excellent results--mostly black except the outlines
*(od + index) = ((rMax - rMin) / 2);
(*(od + index + 1)) = ((gMax - gMin) / 2);
(*(od + index + 2)) = ((bMax - bMin) / 2);
break;
case(FilterTypeEnum::MosaicInGray):
// Now convert to grayscale using luminosity algorithm.
// It produces kind of a grayscale mosaic.
unsigned char red2 = (unsigned char)((rMax + rMin - 2 * bytesCenterPixel.x) / 2) * 0.21;
// Interesting mistake!! (see order of operations of above compared with below)
unsigned char green2 = (unsigned char)(gMax + gMin - 2 * bytesCenterPixel.y / 2) * 0.72;
unsigned char blue2 = (unsigned char)(bMax + bMin - 2 * bytesCenterPixel.z / 2) * 0.07;
unsigned char gray = red2 + green2 + blue2;
*(od + index) = gray;
(*(od + index + 1)) = gray;
(*(od + index + 2)) = gray;
break;
case(FilterTypeEnum::PsychedelicLines):
// Very similar to psychedelic lines, below
*(od + index) = (unsigned char)((rMax + rMin) / 2 - bytesCenterPixel.x);
(*(od + index + 1)) = (unsigned char)((gMax + gMin) / 2 - bytesCenterPixel.y);
(*(od + index + 2)) = (unsigned char)((bMax + bMin) / 2 - bytesCenterPixel.z);
break;
case(FilterTypeEnum::PsychedelicMellowed):
*(od + index) = ((rMax + rMin) / 2 - bytesCenterPixel.x) >= 0 ? ((rMax + rMin) / 2 - bytesCenterPixel.x) + 128 : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytesCenterPixel.y) >= 0 ? ((gMax + gMin) / 2 - bytesCenterPixel.y) + 128 : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytesCenterPixel.z) >= 0 ? ((bMax + bMin) / 2 - bytesCenterPixel.z) + 128 : 0;
break;
case(FilterTypeEnum::ReliefInGray):
// Good results, and is very similar to the other SECOND BEST
*(od + index) = clamp((((rMax + rMin) - 2* bytesCenterPixel.x)/2 + 255)/2, 0, 255);
(*(od + index + 1)) = clamp((((gMax + gMin) -2* bytesCenterPixel.y)/2 + 255)/2, 0, 255);
(*(od + index + 2)) = clamp((((bMax + bMin) -2* bytesCenterPixel.z)/2 + 255)/2, 0, 255);
break;
// The following filters produce good results, too, but in most cases are similar to the ones above
// Wow, psychedelic lines!!!
//*dst = clamp((rMax + rMin) / 2 - *index, 0, 255);
//(*(dst + 1)) = clamp((gMax + gMin) / 2 - (*(index+1)), 0, 255);
//(*(dst + 2)) = clamp((bMax + bMin) / 2 - (*(index+2)), 0, 255);
// Almost a black and white result
//*dst = (unsigned char)(((rMax + rMin) / 2 - *index) / 2 + 255);
//(*(dst + 1)) = (unsigned char)(((gMax + gMin) / 2 - (*(index + 1))) / 2 + 255);
//(*(dst + 2)) = (unsigned char)(((bMax + bMin) / 2 - (*(index + 2))) / 2 + 255);
// This block will produce a negative of whatever filter is applied before it
// Now try producing a negative of the Laplacian (or other--whichever is processed immediately
// before this block), above (should be processed subsequently from it):
//*dst = 255 - *dst;
//(*(dst + 1)) = 255 - (*(dst + 1));
//(*(dst + 2)) = 255 - (*(dst + 2));
// This clamping mechanism was found in imageJ
//unsigned char rExternalGradientDilation = clamp(rMax - *index, 0, 255);
//unsigned char gExternalGradientDilation = clamp(gMax - *(index + 1), 0, 255);
//unsigned char bExternalGradientDilation = clamp(bMax - *(index + 2), 0, 255);
//unsigned char rInternalGradientErosion = clamp(rMin - *index, 0, 255);
//unsigned char gInternalGradientErosion = clamp(gMin - *(index + 1), 0, 255);
//unsigned char bInternalGradientErosion = clamp(bMin - *(index + 2), 0, 255);
//*dst = (unsigned char)clamp(rExternalGradientDilation - rInternalGradientErosion + 128, 0, 255);
//(*(dst + 1)) = (unsigned char)clamp(gExternalGradientDilation - gInternalGradientErosion + 128, 0, 255);
//(*(dst + 2)) = (unsigned char)clamp(bExternalGradientDilation - bInternalGradientErosion + 128, 0, 255);
//**** Wow, very good, all gray scale SECOND BEST
//*dst = ((rMax + rMin) / 2 - *index) / 2 + 128;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) / 2 + 128;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) / 2 + 128;
// Create luminescent bars
//*(od + index) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//(*(od + index + 1)) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//(*(od + index + 2)) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//printf("r=%d, g=%d, b=%d; ", rMax, gMax, bMax);
/*
unsigned char red = (unsigned char)(((rMax + rMin) / 2 - *index)*0.21);
unsigned char green = (unsigned char)(((gMax + gMin) / 2 - (*(index + 1)))*0.72);
unsigned char blue = (unsigned char)(((bMax + bMin) / 2 - (*(index + 2)))*0.07);
//*dst = ((rMax + rMin) / 2 - *index) >= 0 ? red+green+blue : 0;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) >= 0 ? red+green+blue : 0;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) >= 0 ? red+green+blue : 0;
*dst = red + green + blue;
(*(dst + 1)) = red + green + blue;
(*(dst + 2)) = red + green + blue;
*/
}
}
}
// RGBA version
extern "C"
double laplacianFilterRGBA(unsigned char *d_src, unsigned char *d_temp, unsigned char *d_dest, int width, int height,
int iterations, int nthreads, StopWatchInterface *timer, StructuringElementEnum element, FilterTypeEnum filter) //int structuringElement[], int size)
{
// Copy the array containing the structuring element into the device's memory
// Gotta be an easier way to do this (would be nice if could use C++11 std::array, for example)
// For some reason passing in the array from the host code doesn't work (see the backtracking
// involved with the method signature, above)
unsigned int n = 0;
int *devArray;
if (element == StructuringElementEnum::disk3x3)
{
n = int(sqrt(sizeof(disk3x3StructuringElement) / sizeof(*disk3x3StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &disk3x3StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
if (element == StructuringElementEnum::disk5x5)
{
n = int(sqrt(sizeof(disk5x5StructuringElement) / sizeof(*disk5x5StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &disk5x5StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
if (element == StructuringElementEnum::disk7x7)
{
n = int(sqrt(sizeof(disk7x7StructuringElement) / sizeof(*disk7x7StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &disk7x7StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square3x3)
{
n = int(sqrt(sizeof(square3x3StructuringElement) / sizeof(*square3x3StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &square3x3StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square5x5)
{
n = int(sqrt(sizeof(square5x5StructuringElement) / sizeof(*square5x5StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &square5x5StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square7x7)
{
n = int(sqrt(sizeof(square7x7StructuringElement) / sizeof(*square7x7StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &square7x7StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring3x3)
{
n = int(sqrt(sizeof(ring3x3StructuringElement) / sizeof(*ring3x3StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &ring3x3StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring5x5)
{
n = int(sqrt(sizeof(ring5x5StructuringElement) / sizeof(*ring5x5StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &ring5x5StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring7x7)
{
n = int(sqrt(sizeof(ring7x7StructuringElement) / sizeof(*ring7x7StructuringElement)));
checkCudaErrors(hipMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(hipMemcpy(devArray, &ring7x7StructuringElement, n*n * sizeof(int), hipMemcpyHostToDevice));
}
checkCudaErrors(hipBindTextureToArray(rgbaTex, d_array));
// var for kernel computation timing
double dKernelTime;
for (int i = 0; i<iterations; i++)
{
// sync host and start kernel computation timer_kernel
dKernelTime = 0.0;
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&timer);
// use texture for horizontal pass
//d_boxfilter_rgba_x << < height / nthreads, nthreads, 0 >> >(d_temp, width, height, 10);
//d_boxfilter_rgba_y << < width / nthreads, nthreads, 0 >> >(d_temp, d_dest, width, height, 10);
dim3 dimBlock = dim3(16, 16);
int yBlocks = width / dimBlock.y + ((width%dimBlock.y) == 0 ? 0 : 1);
int xBlocks = height / dimBlock.x + ((height%dimBlock.x) == 0 ? 0 : 1);
dim3 dimGrid = dim3(xBlocks, yBlocks);
hipLaunchKernelGGL(( d_laplacianFilter_rgba) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_temp, d_dest, width, height, filter, devArray, n);
// sync host and stop computation timer_kernel
checkCudaErrors(hipDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// copy result back from global memory to array
checkCudaErrors(hipMemcpyToArray(d_tempArray, 0, 0, d_dest, width * height * sizeof(float), hipMemcpyDeviceToDevice));
checkCudaErrors(hipBindTextureToArray(rgbaTex, d_tempArray));
}
}
return ((dKernelTime / 1000.) / (double)iterations);
}
#endif // #ifndef _LAPLACIANFILTER_KERNEL_H_
|
62e3c1a4cfaaaac098c3f0993be9177734d5dcbf.cu
|
/*
* This is the CUDA implementation of a close approximation of the morphological
* Laplacian operator edge detection filter, along with other filters discovered
* by experimentation. The CUDA SDK sample Box Filter was used as a base to
* modify and expand on, and the copyright verbage for the code still present
* is included below as requested by NVIDIA.
*
* ——Ian Calegory, 12/20/2016
*/
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#ifndef _LAPLACIANFILTER_KERNEL_H_
#define _LAPLACIANFILTER_KERNEL_H_
//#include <array>
//#include <cstdlib>
#include "laplacianFilter.h"
#include <helper_math.h>
#include <helper_functions.h>
texture<float, 2> tex;
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
cudaArray *d_array, *d_tempArray;
const int CHANNEL_COUNT = 4;
int disk3x3StructuringElement[] =
{
0, 1, 0,
1, 1, 1,
0, 1, 0
};
int disk5x5StructuringElement[] =
{
0, 1, 1, 1, 0,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
0, 1, 1, 1, 0
};
int disk7x7StructuringElement[] =
{
0, 0, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 0, 0
};
int square3x3StructuringElement[] =
{
1, 1, 1,
1, 1, 1,
1, 1, 1
};
int square5x5StructuringElement[] =
{
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1
};
int square7x7StructuringElement[] =
{
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1
};
int ring3x3StructuringElement[] =
{
0, 1, 0,
1, 0, 1,
0, 1, 0
};
int ring5x5StructuringElement[] =
{
0, 1, 1, 1, 0,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
0, 1, 1, 1, 0
};
int ring7x7StructuringElement[] =
{
0, 0, 1, 1, 1, 0, 0,
0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 1, 1, 0, 0
};
// C++11 style arrays are not easy to use in device code
//std::array<std::array<int, 3>, 3> disk3x3StructuringElement{ {
// { { 0, 1, 0 } },
// { { 1, 1, 1 } },
// { { 0, 1, 0 } }
// } };
// Would be nice to be able to use this or something like it:
//auto &structuringElement = disk5x5StructuringElement;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
extern "C"
void initTexture(int width, int height, void *pImage, bool useRGBA)
{
int size = width * height * (useRGBA ? sizeof(uchar4) : sizeof(float));
// copy image data to array
cudaChannelFormatDesc channelDesc;
if (useRGBA)
{
channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
}
else
{
channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
}
checkCudaErrors(cudaMallocArray(&d_array, &channelDesc, width, height));
checkCudaErrors(cudaMemcpyToArray(d_array, 0, 0, pImage, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMallocArray(&d_tempArray, &channelDesc, width, height));
// set texture parameters
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModePoint;
tex.normalized = true;
// Bind the array to the texture
if (useRGBA)
{
checkCudaErrors(cudaBindTextureToArray(rgbaTex, d_array, channelDesc));
}
else
{
checkCudaErrors(cudaBindTextureToArray(tex, d_array, channelDesc));
}
}
extern "C"
void freeTextures()
{
checkCudaErrors(cudaFreeArray(d_array));
checkCudaErrors(cudaFreeArray(d_tempArray));
}
// This is used to convert tex2D() call results into the byte components
__device__ void convertTexFloatToUChar(uchar4* dst, const float4 src)
{
//const unsigned int idx = getTextureIndex();
//_dst[idx].x = (unsigned char)(_src[idx].x * 255.9999f);
//_dst[idx].y = (unsigned char)(_src[idx].y * 255.9999f);
//_dst[idx].z = (unsigned char)(_src[idx].z * 255.9999f);
//_dst[idx].w = (unsigned char)(_src[idx].w * 255.9999f);
(*dst).x = (unsigned char)(src.x * 255.9999f);
(*dst).y = (unsigned char)(src.y * 255.9999f);
(*dst).z = (unsigned char)(src.z * 255.9999f);
(*dst).w = (unsigned char)(src.w * 255.9999f);
}
/*
Perform 2D morphological Laplacian operator (approximately? along with a number
of variations) on image using CUDA
This works by calculating the dilation and erosion of the image using the structuring
element centered on the current pixel being processed. It's passed in as the array
d_structuringElement, which is a 2d array flattened into a 1d array for passing into
CUDA with device cudaMemcpyHostToDevice calls. Dilation is computed by finding the
maximum r, g, and b values for the pixels around the current pixel determined by the
mask of the structuring element. (If the and of the masking structuring element pixel
and the source image pixel in the corresponding position with the mask overlaid onto
the source image is 1, include that pixel in the source of pixels for choosing maximum
values.)
Erosion is computed similarly, though replacing the source pixel with the components
having the minimum instead of maximum values.
Dilation results in what's called an internal gradient, while erosion results in an
external gradient. For further reference on computing the internal and external
gradients, see for example http://www.inf.u-szeged.hu/ssip/1996/morpho/morphology.html
The grayscale filter uses the luminosity algorithm for converting to grayscale:
0.21 R + 0.72 G + 0.07 B
--Ian Calegory, 12/20/2016
// Comment from original box filter left here for reference--so as a reminder to check
// for coalescence
Note that the x (row) pass suffers from uncoalesced global memory reads,
since each thread is reading from a different row. For this reason it is
better to use texture lookups for the x pass.
The y (column) pass is perfectly coalesced.
Parameters:
id - pointer to input image in device memory (not used here--texture is used instead)
od - pointer to destination image in device memory
w - image width
h - image height
d_structuringElement - element 0 of the structuring element array
n - structuring element is nxn matrix
*/
__global__ void
d_laplacianFilter_rgba(unsigned char *id, unsigned char *od, int w, int h, FilterTypeEnum filter, int* d_structuringElement, unsigned int n)
{
unsigned int colIndex = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int rowIndex = blockIdx.y*blockDim.y + threadIdx.y;
if (rowIndex < h && colIndex < w) {
unsigned int index = rowIndex*w*CHANNEL_COUNT + colIndex*CHANNEL_COUNT;
//if (colIndex > 1085)
//printf("colIndex, %d, rowIndex, %d, pixelContents, %d, textureContents, %d\n", colIndex, rowIndex, *(id + index), tex2D(rgbaTex, colIndex, rowIndex));
//printf("w, %d, h, %d, colIndex, %d, rowIndex, %d\n", w, h, colIndex, rowIndex);
// Convert float4 texture info to uchar4 to extract r, g, b, and a components
float4 texelCenter = tex2D(rgbaTex, colIndex, rowIndex);
uchar4 bytesCenterPixel;
bytesCenterPixel.x = (unsigned char)(texelCenter.x * 255.9999f);
bytesCenterPixel.y = (unsigned char)(texelCenter.y * 255.9999f);
bytesCenterPixel.z = (unsigned char)(texelCenter.z * 255.9999f);
bytesCenterPixel.w = (unsigned char)(texelCenter.w * 255.9999f);
//printf("r=%d, g=%d, b=%d, a=%d, texel=%d ", bytes.x, bytes.y, bytes.z, bytes.w, texel);
// Now cycle through every pixel of the structuring element, and process
// both dilation and erosion of the original image.
unsigned char rMin = 255, gMin = 255, bMin = 255;
unsigned char rMax = 0, gMax = 0, bMax = 0;
int maxVert = n / 2;
// Treat k as the structuring element's x coordinate
for (int k = -maxVert; k <= maxVert; k++)
{
int maxHoriz = n / 2;
// Treat l as the structuring element's y coordinate
for (int l = -maxHoriz; l <= maxHoriz; l++)
{
// Make sure that the structuring element has a value of 1 in the position being processed,
// and that the point with the structuring element overlaid is also inside the bounds of the image.
if (d_structuringElement[(k + maxVert)*n + (l + maxHoriz)] == 1 && rowIndex + k >= 0 && rowIndex + k < h && colIndex + l >= 0 && colIndex + l < w)
{
// Determine offset [NOT USED HERE SINCE CUDA VERSION USES TEXTURE INSTEAD OF RAW MEMORY]
//int offset = k*w*CHANNEL_COUNT + l*CHANNEL_COUNT;
// Convert float4 texture info to uchar4 to extract r, g, b, and a components
float4 texel = tex2D(rgbaTex, colIndex + l, rowIndex + k);
// bytes.x = red, bytes.y = green, bytes.z = blue, bytes.w = alpha
uchar4 bytes;
convertTexFloatToUChar(&bytes, texel);
//printf("r=%d, g=%d, b=%d, a=%d, texel=%d; ", bytes.x, bytes.y, bytes.z, bytes.w, texel);
// Checks for dilation
if (bytes.x > rMax)
rMax = bytes.x;
if (bytes.y > gMax)
gMax = bytes.y;
if (bytes.z > bMax)
bMax = bytes.z;
// Checks for erosion
if (bytes.x < rMin)
rMin = bytes.x;
if (bytes.y < gMin)
gMin = bytes.y;
if (bytes.z < bMin)
bMin = bytes.z;
/*
// The following method is for raw image memory manipulation by pointers and offsets
// from the CPU version
// Checks for dilation
if ((*(id + index + offset)) > rMax)
rMax = (unsigned char)(*(id + index + offset));
if ((*(id + index + offset + 1)) > gMax)
gMax = (unsigned char)(*(id + index + offset + 1));
if ((*(id + index + offset + 2)) > bMax)
bMax = (unsigned char)(*(id + index + offset + 2));
// Checks for erosion
if ((*(id + index + offset)) < rMin)
rMin = (unsigned char)(*(id + index + offset));
if ((*(id + index + offset + 1)) < gMin)
gMin = (unsigned char)(*(id + index + offset + 1));
if ((*(id + index + offset + 2)) < bMin)
bMin = (unsigned char)(*(id + index + offset + 2));
*/
}
}
}
// Filter according to which filter is selected
switch(filter)
{
case(FilterTypeEnum::AlmostAReference):
// This is very succinct and crisp and clear! Mostly black, which outlines etched in sharp white
// THE BEST OUT OF ALL OF THEM -- and, the closest to the reference Laplacian image provided
*(od + index) = ((rMax + rMin) / 2 - bytesCenterPixel.x) >= 0 ? (unsigned char)((rMax + rMin) / 2 - bytesCenterPixel.x) : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytesCenterPixel.y) >= 0 ? (unsigned char)((gMax + gMin) / 2 - bytesCenterPixel.y) : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytesCenterPixel.z) >= 0 ? (unsigned char)((bMax + bMin) / 2 - bytesCenterPixel.z) : 0;
break;
case(FilterTypeEnum::AlmostFlattened):
// Looks like very succinct three shades of gray
// This is a luminosity-type conversion to grayscale
unsigned char red = (unsigned char)((((rMax + rMin) / 2 - bytesCenterPixel.x)/2 + 255)*0.21);
unsigned char green = (unsigned char)((((gMax + gMin) / 2 - bytesCenterPixel.y)/2 + 255)*0.72);
unsigned char blue = (unsigned char)((((bMax + bMin) / 2 - bytesCenterPixel.z)/2 + 255)*0.07);
//*dst = ((rMax + rMin) / 2 - *index) >= 0 ? red+green+blue : 0;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) >= 0 ? red+green+blue : 0;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) >= 0 ? red+green+blue : 0;
*(od + index) = red + green + blue;
(*(od + index + 1)) = red + green + blue;
(*(od + index + 2)) = red + green + blue;
break;
case(FilterTypeEnum::AntiAliasingSmoothFuzz):
// Excellent and very succinct outlines! Colorizes to blue and yellow (BUT NOT IN THE
// CUDA VERSION FOR SOME REASON!!)
// This is the Laplacian according to http://www.mif.vu.lt/atpazinimas/dip/FIP/fip-Morpholo.html
// which defines it as ½(dilation+erosion-2*source).
// (Wow, the order of operations of the green and blue commands was mistaken in the CPU version,
// which produced though a really cool filter effect--but oddly does not seem reproducible in
// this CUDA version!)
//(*(od + index + 1)) = (unsigned char)(gMax + gMin - 2 * bytesCenterPixel.y / 2);
//(*(od + index + 2)) = (unsigned char)(bMax + bMin - 2 * bytesCenterPixel.z / 2);
*(od + index) = (unsigned char)((rMax + rMin - 2*bytesCenterPixel.x)/2);
(*(od + index + 1)) = (unsigned char)((gMax + gMin - 2* bytesCenterPixel.y)/2);
(*(od + index + 2)) = (unsigned char)((bMax + bMin - 2* bytesCenterPixel.z)/2);
break;
case(FilterTypeEnum::FuzzInWideOutline):
// This is wrong--used src instead of index, but it produces a unique result--
// good gray outlines, though rest of image is fuzzy. Src is the location
// of the first pixel in the original CPU code, and its behavior is emulated
// here by getting the texel at the 0,0 position.
float4 texel2 = tex2D(rgbaTex, 0, 0);
uchar4 bytes2;
convertTexFloatToUChar(&bytes2, texel2);
*(od + index) = ((rMax + rMin) / 2 - bytes2.x) >= 0 ? ((rMax + rMin) / 2 - bytes2.x) : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytes2.y) >= 0 ? ((gMax + gMin) / 2 - bytes2.y) : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytes2.z) >= 0 ? ((bMax + bMin) / 2 - bytes2.z) : 0;
break;
case(FilterTypeEnum::GhostEdges):
// From imageJ (very similar to the clamping method below found in imageJ)
*(od + index) = clamp(rMax - rMin + 128, 0, 255);
(*(od + index + 1)) = clamp(gMax - gMin + 128, 0, 255);
(*(od + index + 2)) = clamp(bMax - bMin + 128, 0, 255);
break;
case(FilterTypeEnum::InvisoWithWideOutlines):
// Excellent results--mostly black except the outlines
*(od + index) = ((rMax - rMin) / 2);
(*(od + index + 1)) = ((gMax - gMin) / 2);
(*(od + index + 2)) = ((bMax - bMin) / 2);
break;
case(FilterTypeEnum::MosaicInGray):
// Now convert to grayscale using luminosity algorithm.
// It produces kind of a grayscale mosaic.
unsigned char red2 = (unsigned char)((rMax + rMin - 2 * bytesCenterPixel.x) / 2) * 0.21;
// Interesting mistake!! (see order of operations of above compared with below)
unsigned char green2 = (unsigned char)(gMax + gMin - 2 * bytesCenterPixel.y / 2) * 0.72;
unsigned char blue2 = (unsigned char)(bMax + bMin - 2 * bytesCenterPixel.z / 2) * 0.07;
unsigned char gray = red2 + green2 + blue2;
*(od + index) = gray;
(*(od + index + 1)) = gray;
(*(od + index + 2)) = gray;
break;
case(FilterTypeEnum::PsychedelicLines):
// Very similar to psychedelic lines, below
*(od + index) = (unsigned char)((rMax + rMin) / 2 - bytesCenterPixel.x);
(*(od + index + 1)) = (unsigned char)((gMax + gMin) / 2 - bytesCenterPixel.y);
(*(od + index + 2)) = (unsigned char)((bMax + bMin) / 2 - bytesCenterPixel.z);
break;
case(FilterTypeEnum::PsychedelicMellowed):
*(od + index) = ((rMax + rMin) / 2 - bytesCenterPixel.x) >= 0 ? ((rMax + rMin) / 2 - bytesCenterPixel.x) + 128 : 0;
(*(od + index + 1)) = ((gMax + gMin) / 2 - bytesCenterPixel.y) >= 0 ? ((gMax + gMin) / 2 - bytesCenterPixel.y) + 128 : 0;
(*(od + index + 2)) = ((bMax + bMin) / 2 - bytesCenterPixel.z) >= 0 ? ((bMax + bMin) / 2 - bytesCenterPixel.z) + 128 : 0;
break;
case(FilterTypeEnum::ReliefInGray):
// Good results, and is very similar to the other SECOND BEST
*(od + index) = clamp((((rMax + rMin) - 2* bytesCenterPixel.x)/2 + 255)/2, 0, 255);
(*(od + index + 1)) = clamp((((gMax + gMin) -2* bytesCenterPixel.y)/2 + 255)/2, 0, 255);
(*(od + index + 2)) = clamp((((bMax + bMin) -2* bytesCenterPixel.z)/2 + 255)/2, 0, 255);
break;
// The following filters produce good results, too, but in most cases are similar to the ones above
// Wow, psychedelic lines!!!
//*dst = clamp((rMax + rMin) / 2 - *index, 0, 255);
//(*(dst + 1)) = clamp((gMax + gMin) / 2 - (*(index+1)), 0, 255);
//(*(dst + 2)) = clamp((bMax + bMin) / 2 - (*(index+2)), 0, 255);
// Almost a black and white result
//*dst = (unsigned char)(((rMax + rMin) / 2 - *index) / 2 + 255);
//(*(dst + 1)) = (unsigned char)(((gMax + gMin) / 2 - (*(index + 1))) / 2 + 255);
//(*(dst + 2)) = (unsigned char)(((bMax + bMin) / 2 - (*(index + 2))) / 2 + 255);
// This block will produce a negative of whatever filter is applied before it
// Now try producing a negative of the Laplacian (or other--whichever is processed immediately
// before this block), above (should be processed subsequently from it):
//*dst = 255 - *dst;
//(*(dst + 1)) = 255 - (*(dst + 1));
//(*(dst + 2)) = 255 - (*(dst + 2));
// This clamping mechanism was found in imageJ
//unsigned char rExternalGradientDilation = clamp(rMax - *index, 0, 255);
//unsigned char gExternalGradientDilation = clamp(gMax - *(index + 1), 0, 255);
//unsigned char bExternalGradientDilation = clamp(bMax - *(index + 2), 0, 255);
//unsigned char rInternalGradientErosion = clamp(rMin - *index, 0, 255);
//unsigned char gInternalGradientErosion = clamp(gMin - *(index + 1), 0, 255);
//unsigned char bInternalGradientErosion = clamp(bMin - *(index + 2), 0, 255);
//*dst = (unsigned char)clamp(rExternalGradientDilation - rInternalGradientErosion + 128, 0, 255);
//(*(dst + 1)) = (unsigned char)clamp(gExternalGradientDilation - gInternalGradientErosion + 128, 0, 255);
//(*(dst + 2)) = (unsigned char)clamp(bExternalGradientDilation - bInternalGradientErosion + 128, 0, 255);
//**** Wow, very good, all gray scale SECOND BEST
//*dst = ((rMax + rMin) / 2 - *index) / 2 + 128;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) / 2 + 128;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) / 2 + 128;
// Create luminescent bars
//*(od + index) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//(*(od + index + 1)) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//(*(od + index + 2)) = (blockIdx.x*blockDim.x + threadIdx.x) % 256;
//printf("r=%d, g=%d, b=%d; ", rMax, gMax, bMax);
/*
unsigned char red = (unsigned char)(((rMax + rMin) / 2 - *index)*0.21);
unsigned char green = (unsigned char)(((gMax + gMin) / 2 - (*(index + 1)))*0.72);
unsigned char blue = (unsigned char)(((bMax + bMin) / 2 - (*(index + 2)))*0.07);
//*dst = ((rMax + rMin) / 2 - *index) >= 0 ? red+green+blue : 0;
//(*(dst + 1)) = ((gMax + gMin) / 2 - (*(index + 1))) >= 0 ? red+green+blue : 0;
//(*(dst + 2)) = ((bMax + bMin) / 2 - (*(index + 2))) >= 0 ? red+green+blue : 0;
*dst = red + green + blue;
(*(dst + 1)) = red + green + blue;
(*(dst + 2)) = red + green + blue;
*/
}
}
}
// RGBA version
extern "C"
double laplacianFilterRGBA(unsigned char *d_src, unsigned char *d_temp, unsigned char *d_dest, int width, int height,
int iterations, int nthreads, StopWatchInterface *timer, StructuringElementEnum element, FilterTypeEnum filter) //int structuringElement[], int size)
{
// Copy the array containing the structuring element into the device's memory
// Gotta be an easier way to do this (would be nice if could use C++11 std::array, for example)
// For some reason passing in the array from the host code doesn't work (see the backtracking
// involved with the method signature, above)
unsigned int n = 0;
int *devArray;
if (element == StructuringElementEnum::disk3x3)
{
n = int(sqrt(sizeof(disk3x3StructuringElement) / sizeof(*disk3x3StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &disk3x3StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
if (element == StructuringElementEnum::disk5x5)
{
n = int(sqrt(sizeof(disk5x5StructuringElement) / sizeof(*disk5x5StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &disk5x5StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
if (element == StructuringElementEnum::disk7x7)
{
n = int(sqrt(sizeof(disk7x7StructuringElement) / sizeof(*disk7x7StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &disk7x7StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square3x3)
{
n = int(sqrt(sizeof(square3x3StructuringElement) / sizeof(*square3x3StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &square3x3StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square5x5)
{
n = int(sqrt(sizeof(square5x5StructuringElement) / sizeof(*square5x5StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &square5x5StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::square7x7)
{
n = int(sqrt(sizeof(square7x7StructuringElement) / sizeof(*square7x7StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &square7x7StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring3x3)
{
n = int(sqrt(sizeof(ring3x3StructuringElement) / sizeof(*ring3x3StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &ring3x3StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring5x5)
{
n = int(sqrt(sizeof(ring5x5StructuringElement) / sizeof(*ring5x5StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &ring5x5StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
else if (element == StructuringElementEnum::ring7x7)
{
n = int(sqrt(sizeof(ring7x7StructuringElement) / sizeof(*ring7x7StructuringElement)));
checkCudaErrors(cudaMalloc((void**)&devArray, n*n * sizeof(int)));
checkCudaErrors(cudaMemcpy(devArray, &ring7x7StructuringElement, n*n * sizeof(int), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaBindTextureToArray(rgbaTex, d_array));
// var for kernel computation timing
double dKernelTime;
for (int i = 0; i<iterations; i++)
{
// sync host and start kernel computation timer_kernel
dKernelTime = 0.0;
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&timer);
// use texture for horizontal pass
//d_boxfilter_rgba_x << < height / nthreads, nthreads, 0 >> >(d_temp, width, height, 10);
//d_boxfilter_rgba_y << < width / nthreads, nthreads, 0 >> >(d_temp, d_dest, width, height, 10);
dim3 dimBlock = dim3(16, 16);
int yBlocks = width / dimBlock.y + ((width%dimBlock.y) == 0 ? 0 : 1);
int xBlocks = height / dimBlock.x + ((height%dimBlock.x) == 0 ? 0 : 1);
dim3 dimGrid = dim3(xBlocks, yBlocks);
d_laplacianFilter_rgba <<< dimGrid, dimBlock >>>(d_temp, d_dest, width, height, filter, devArray, n);
// sync host and stop computation timer_kernel
checkCudaErrors(cudaDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// copy result back from global memory to array
checkCudaErrors(cudaMemcpyToArray(d_tempArray, 0, 0, d_dest, width * height * sizeof(float), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaBindTextureToArray(rgbaTex, d_tempArray));
}
}
return ((dKernelTime / 1000.) / (double)iterations);
}
#endif // #ifndef _LAPLACIANFILTER_KERNEL_H_
|
a72efcd1988788649e142316c8db3f7896d33a79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include<hip/hip_runtime_api.h>
#include<rocblas.h>
#include<stdio.h>
#include "matcov.h"
#include "matcov_gpu.h"
/* Tuning parameters of tbulateDPHI kernel*/
#define tabDPHI_thread_x (256)
/* Tuning parameters of matcov GPU Kernel */
// Thread block size (x, y),
// max #threads per block is 512 for fermi and 1024 for kepler
#define matcov_thread_x (8)
#define matcov_thread_y (8)
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
//============================================================================================
//================================= AUX FUNCTIONS ============================================
//============================================================================================
void process_error(hipError_t e, const char* str)
{
if(e != hipSuccess)
{
printf("*** Error %s: %s \n", str, hipGetErrorString(e));
exit(1);
}
}
//-----------------------------------------------------------------------
double* arr2dAlloc_gpu(long nbLin, long nbCol)
/* DOCUMENT array = arr2dAlloc(nblin,nbcol)
Allocates a 2d array (double).
*/
{
hipError_t e;
double* tableau;
e = hipMalloc((void**)&tableau, sizeof(double) * nbCol * nbLin);
process_error(e, "gpu alloc tableau2");
return tableau;
}
void arr2dFree_gpu(double *tableau)
/* DOCUMENT arr2dFree(array)
Free a 2d array (double).
*/
{
if(tableau)hipFree(tableau);
}
//============================================================================================
//============================= tabDPHI KERNEL(s) ============================================
//============================================================================================
__device__ double macdo_x56_gpu(double x, int k)
/* DOCUMENT macdo_x56_gpu(x)
Computation of the function
f(x) = x^(5/6)*K_{5/6}(x)
using a series for the esimation of K_{5/6}, taken from Rod Conan thesis :
K_a(x)=1/2 \sum_{n=0}^\infty \frac{(-1)^n}{n!}
\left(\Gamma(-n-a) (x/2)^{2n+a} + \Gamma(-n+a) (x/2)^{2n-a} \right) ,
with a = 5/6.
Setting x22 = (x/2)^2, setting uda = (1/2)^a, and multiplying by x^a,
this becomes :
x^a * Ka(x) = 0.5 $ -1^n / n! [ G(-n-a).uda x22^(n+a) + G(-n+a)/uda x22^n ]
Then we use the following recurrence formulae on the following quantities :
G(-(n+1)-a) = G(-n-a) / -a-n-1
G(-(n+1)+a) = G(-n+a) / a-n-1
(n+1)! = n! * (n+1)
x22^(n+1) = x22^n * x22
and at each iteration on n, one will use the values already computed at step (n-1).
The values of G(a) and G(-a) are hardcoded instead of being computed.
The first term of the series has also been skipped, as it
vanishes with another term in the expression of Dphi.
SEE ALSO:
*/
{
const double a = 5. / 6.;
const double x2a = pow(x, 2. * a), x22 = x * x / 4.;
double x2n; // x^2.a, etc
double s = 0.0;
int n;
const double Ga[11] = { 0, 12.067619015983075, 5.17183672113560444,
0.795667187867016068, 0.0628158306210802181, 0.00301515986981185091,
9.72632216068338833e-05, 2.25320204494595251e-06, 3.93000356676612095e-08,
5.34694362825451923e-10, 5.83302941264329804e-12 };
const double Gma[11] = { -3.74878707653729304, -2.04479295083852408,
-0.360845814853857083, -0.0313778969438136685, -0.001622994669507603,
-5.56455315259749673e-05, -1.35720808599938951e-06,
-2.47515152461894642e-08, -3.50257291219662472e-10,
-3.95770950530691961e-12, -3.65327031259100284e-14 };
x2n = 0.5; // init (1/2) * x^0
s = Gma[0] * x2a;
s *= x2n;
// prepare recurrence iteration for next step
x2n *= x22; // x^n
#pragma unroll
for (n = 1; n <= 10; n++)
{
s += (Gma[n] * x2a + Ga[n]) * x2n;
// prepare recurrence iteration for next step
x2n *= x22; // x^n
}
return s;
}
//------------------------------------------------------------------------------------
__device__ double asymp_macdo_gpu(double x)
/* DOCUMENT asymp_macdo_gpu(x)
Computes a term involved in the computation of the phase struct
function with a finite outer scale according to the Von-Karman
model. The term involves the MacDonald function (modified bessel
function of second kind) K_{5/6}(x), and the algorithm uses the
asymptotic form for x ~ infinity.
Warnings :
- This function makes a doubleing point interrupt for x=0
and should not be used in this case.
- Works only for x>0.
SEE ALSO:
*/
{
// k2 is the value for
// gamma_R(5./6)*2^(-1./6)
const double k2 = 1.00563491799858928388289314170833;
const double k3 = 1.25331413731550012081; // sqrt(pi/2)
const double a1 = 0.22222222222222222222; // 2/9
const double a2 = -0.08641975308641974829; // -7/89
const double a3 = 0.08001828989483310284; // 175/2187
double res;
double x_1;
x_1 = 1. / x;
res = k2
- k3 * exp(-x) * pow(x, 1 / 3.)
* (1.0 + x_1 * (a1 + x_1 * (a2 + x_1 * a3)));
return res;
}
//------------------------------------------------------------------------------------
__device__ double rodconan_gpu(double r, double L0, int k)
/* DOCUMENT rodconan_gpu(r,L0,k=)
The phase structure function is computed from the expression
Dphi(r) = k1 * L0^(5./3) * (k2 - (2.pi.r/L0)^5/6 K_{5/6}(2.pi.r/L0))
For small r, the expression is computed from a development of
K_5/6 near 0. The value of k2 is not used, as this same value
appears in the series and cancels with k2.
For large r, the expression is taken from an asymptotic form.
SEE ALSO:
*/
{
const double pi = 3.1415926535897932384626433;
double res = 0;
// k1 is the value of :
// 2*gamma_R(11./6)*2^(-5./6)*pi^(-8./3)*(24*gamma_R(6./5)/5.)^(5./6);
const double k1 = 0.1716613621245709486;
const double dprf0 = (2 * pi / L0) * r;
// k2 is the value for gamma_R(5./6)*2^(-1./6),
// but is now unused
// k2 = 1.0056349179985892838;
// Xlim = 0.75*2*pi; // = 4.71239
if (dprf0 > 4.71239)
res = asymp_macdo_gpu(dprf0);
else
res = -macdo_x56_gpu(dprf0, k);
res *= k1 * pow(L0, 5. / 3);
return res;
}
__global__ void tabulateDPHI_gpu_kernel(double* tabDPHI_d, double* L0diff_d, long Nl0, long Ndphi, double convert)
{
const int tx = threadIdx.x;
const int ty = blockIdx.x;
const int tid = ty * blockDim.x + tx;
int l = tid / Ndphi;
int j = tid % Ndphi;
if(tid >= (Nl0*Ndphi) ) return;
tabDPHI_d[tid] = rodconan_gpu((double)j / convert, L0diff_d[l], 10);
//double* mytabDPHI = tabDPHI_d + (l * Ndphi);
//
//int j, k;
//#pragma unroll
//for(k = 0; k < (Ndphi/tabDPHI_thread_x); k++)
//{
// j = k * tabDPHI_thread_x + tx;
// mytabDPHI[j] = rodconan_gpu(rr_d[j], L0diff_d[l], 10);
//}
//
//k = (Ndphi/tabDPHI_thread_x);
//if(tx < (Ndphi%tabDPHI_thread_x) )
//{
// j = k * tabDPHI_thread_x + tx;
// mytabDPHI[j] = rodconan_gpu(rr_d[j], L0diff_d[l], 10);
//}
}
//------------------------------------------------------------------------------------
double* tabulateDPHI_gpu(struct tomo_struct tomo, long Ndphi, long *indexL0, int* Nl0_, double convert)
//void tabulateDPHI_gpu(double* tabDPHI_d, double* rr_d,struct tomo_struct tomo, long Ndphi, long *indexL0_h)
/* DOCUMENT tabDPHI = tabulateDPHI(rr,tomo,Ndphi, indexL0)
<tomo> : structure with all the needed information
<Ndphi> : size of rr
<indexL0> : link between the index of the studied layer and the index of the precomputed one.
Computes the phase structure function for a separation rr(x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
Computes the phase structure for each different L0 and give a array (indexL0) to link the index of the layer i and the index of tabDPHI : for the layer l, DPHI = DPHI( du, dv, indexL0[l],rr,tabDPHI, convert).
SEE ALSO: DPHI
*/
{
//Search the different L0 and build indexL0
const long Nlayer = tomo.Nlayer;
long i, j;
int cpt = 1;
double tmp[Nlayer];
hipError_t e;
tmp[0] = tomo.L0[0];
indexL0[0] = 0;
for (i = 1; i < Nlayer; i++)
{
j = 0;
const double l0 = tomo.L0[i];
while ((j < cpt) && (tmp[j] != l0)) {j++;}
indexL0[i] = j;
if (j == cpt)
{
tmp[j] = l0;
cpt++;
}
}
const int Nl0 = cpt;
double L0diff[Nl0];
double* L0diff_d;
// allocate space for L0
e = hipMalloc((void**)&L0diff_d, Nl0*sizeof(double));
process_error(e, "alloc gpu L0diff_d");
for (i = 0; i < Nl0; i++)
{
L0diff[i] = tmp[i];
}
// offload L0diff
e = hipMemcpy(L0diff_d, L0diff, Nl0*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "offload L0diff");
//prcalcul de DPHI : que pour chaque diffrent L0
double* tabDPHI_d = arr2dAlloc_gpu(Nl0, Ndphi);
// gpu kernel goes here
//for (l = 0; l < Nl0; l++)
//{
// #ifdef USE_OPENMP
// #pragma omp parallel num_threads(tomo.ncpu)
// #pragma omp for nowait
// #endif
// for (j = 0; j < Ndphi; j++)
// {
// tabDPHI[l][j] = rodconan_gpu(rr[j], L0diff[l], 10);
// }
//}
// Assume one thread per element
int nblocks = (Ndphi*Nl0)/tabDPHI_thread_x + ( ((Ndphi*Nl0)%tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
hipLaunchKernelGGL(( tabulateDPHI_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, tabDPHI_d, L0diff_d, Nl0, Ndphi, convert);
if(L0diff_d)hipFree(L0diff_d);
*Nl0_ = Nl0;
return tabDPHI_d;
}
//------------------------------------------------------------------------------------
__device__ double DPHI_gpu(double x, double y, long indexL0, double *tabDPHI, double convert, int Ndphi)
/* DOCUMENT dphi = DPHI(x,y,indexL0,rr,tabDPHI,convert) * r0^(-5./3)
<x> & <y> : separation between apertures
<indexL0> : index for the L0 taken into account
<rr> : array of distance between apertures
<tabDPHI> : array of precomputed DPHI
<convert> : relation between the index on tabDPHI and (x,y)
Computes the phase structure function for a separation (x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
SEE ALSO:
*/
{
double r = sqrt(x * x + y * y);
long i0 = (long) (r * convert);
long i1 = i0 + 1;
return ((r - (double)i0 / convert) * tabDPHI[indexL0 * Ndphi + i1]
+ ((double)i1 / convert - r) * tabDPHI[indexL0 * Ndphi + i0]);
}
//------------------------------------------------------------------------------------
__device__ double cov_XX(double du, double dv, double ac, double ad, double bc, double bd, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the XX-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du + ac, dv, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + ad, dv, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + bc, dv, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du + bd, dv, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__device__ double cov_YY(double du, double dv, double ac, double ad, double bc, double bd, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the YY-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du, dv + ac, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du, dv + ad, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du, dv + bc, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du, dv + bd, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__device__ double cov_XY(double du, double dv, double s0, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the XY-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du + s0, dv - s0, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + s0, dv + s0, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du - s0, dv - s0, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du - s0, dv + s0, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__global__ void subposition_gpu_kernel(long Nw, long Nsubap, long Nlayer, double *alphaX, double *alphaY,
double *h, double *GsAlt, long *Nssp, double *diamPup, double *thetaML,
long *ioff, double *X, double *Y, double *XPup, double *YPup,
double *u, double *v)
{
const int tx = threadIdx.x;
const int ty = blockIdx.x;
const int tid = ty * blockDim.x + tx;
long i;
long n;
long l;
const double rad = 3.14159265358979323846 / 180.;
if(tid >= (Nw * Nsubap * Nlayer) ) return;
l = tid / (Nw * Nsubap);
const int pos = tid - l * (Nsubap * Nw);
i = pos / Nw;
n = pos - i * Nw;
//tid = n + i * Nw + l * Nw * Nsubap
const double dX = alphaX[n] * h[l];
const double dY = alphaY[n] * h[l];
const double rr = 1. - h[l] * GsAlt[n];
const long nssp = Nssp[n];
//magnification factor
const double G = diamPup[n] / (double) (nssp);
//rotation angle
const double th = thetaML[n] * rad;
//taking magnification factor into account
const double xtp = X[ioff[n] + i] * G;
const double ytp = Y[ioff[n] + i] * G;
//taking rotation into account
double uu = xtp * cos(th) - ytp * sin(th);
double vv = xtp * sin(th) + ytp * cos(th);
//taking pupil offset into account
uu += XPup[n];
vv += YPup[n];
//Projection onto the layer
u[tid] = uu * rr + dX;
v[tid] = vv * rr + dY;
}
//------------------------------------------------------------------------------------
//extern "C"
void subap_position_gpu(struct tomo_struct tomo, double *u_d, double *v_d)
//void subap_position_gpu(struct tomo_struct tomo, double ***u, double ***v)
/* DOCUMENT DOCUMENT subap_position(tomo, u, v)
<tomo> : structure with all the needed information.
<u> and <v> : 3d arrays containing the sub-apertures projected coordinates onto all the layers. u[0][2][1] is the X-coordinate of the subap 2 of the WFS 0 on the layer 1.
Computes the projected coordinates of all subapertures projected onto all the layer
*/
{
hipError_t e;
long ioff[tomo.Nw];
ioff[0] = 0;
for (int i=1;i<tomo.Nw;i++) ioff[i] = ioff[i-1] + tomo.Nsubap[i-1];
long* ioff_d;
e = hipMalloc((void**)&ioff_d, tomo.Nw*sizeof(long));
process_error(e, "alloc gpu ioff_d");
e = hipMemcpy(ioff_d, ioff, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu ioff_d");
double *alphaX_d;
e = hipMalloc((void**)&alphaX_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaX_d");
e = hipMemcpy(alphaX_d, tomo.alphaX, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu alphaX_d");
double *alphaY_d;
e = hipMalloc((void**)&alphaY_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaY_d");
e = hipMemcpy(alphaY_d, tomo.alphaY, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu alphaY_d");
double *h_d;
e = hipMalloc((void**)&h_d, tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu h_d");
e = hipMemcpy(h_d, tomo.h, tomo.Nlayer*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu h_d");
double *GsAlt_d;
e = hipMalloc((void**)&GsAlt_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu GsAlt_d");
e = hipMemcpy(GsAlt_d, tomo.GsAlt, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu GsAlt_d");
long *Nssp_d;
e = hipMalloc((void**)&Nssp_d, tomo.Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = hipMemcpy(Nssp_d, tomo.Nssp, tomo.Nw*sizeof(long), hipMemcpyHostToDevice);
process_error(e, "copy gpu Nssp_d");
double *diamPup_d;
e = hipMalloc((void**)&diamPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu diamPup_d");
e = hipMemcpy(diamPup_d, tomo.diamPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu diamPup_d");
double *thetaML_d;
e = hipMalloc((void**)&thetaML_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu thetaML_d");
e = hipMemcpy(thetaML_d, tomo.thetaML, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu thetaML_d");
double *X_d;
e = hipMalloc((void**)&X_d, tomo.Nx*sizeof(double));
process_error(e, "alloc gpu X_d");
e = hipMemcpy(X_d, tomo.X, tomo.Nx*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu X_d");
double *Y_d;
e = hipMalloc((void**)&Y_d, tomo.Nx*sizeof(double));
process_error(e, "alloc gpu Y_d");
e = hipMemcpy(Y_d, tomo.Y, tomo.Nx*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu Y_d");
double *XPup_d;
e = hipMalloc((void**)&XPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu XPup_d");
e = hipMemcpy(XPup_d, tomo.XPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu XPup_d");
double *YPup_d;
e = hipMalloc((void**)&YPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = hipMemcpy(YPup_d, tomo.YPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu YPup_d");
int msize = tomo.Nlayer * tomo.Nw * tomo.Nsubap[0];
int nblocks = msize / tabDPHI_thread_x + ( ( msize % tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
hipLaunchKernelGGL(( subposition_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, tomo.Nw, tomo.Nsubap[0], tomo.Nlayer, alphaX_d, alphaY_d,
h_d, GsAlt_d, Nssp_d, diamPup_d, thetaML_d, ioff_d, X_d,
Y_d, XPup_d, YPup_d, u_d, v_d);
if (ioff_d) hipFree(ioff_d);
if (alphaX_d) hipFree(alphaX_d);
if (alphaY_d) hipFree(alphaY_d);
if (h_d) hipFree(h_d);
if (GsAlt_d) hipFree(GsAlt_d);
if (Nssp_d) hipFree(Nssp_d);
if (diamPup_d) hipFree(diamPup_d);
if (thetaML_d) hipFree(thetaML_d);
if (X_d) hipFree(X_d);
if (Y_d) hipFree(Y_d);
if (XPup_d) hipFree(XPup_d);
if (YPup_d) hipFree(YPup_d);
}
//============================================================================================
//============================= GENERATION KERNEL ============================================
//============================================================================================
__device__ double compute_element(int ipos, int jpos, int *tab_wfs, int* tab_subap, int* tab_xy, double convert,
double *sspSizeL, long *Nssp, double *u, double *v, double pasDPHI,double *tabDPHI,
long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap,
int type_mat, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS m
int m = tab_wfs[ipos];
if (type_mat == 3) m = Nw-1;
//WFS n
int n = tab_wfs[jpos];
if (type_mat == 2) n = Nw-1;
//subap i
int i = tab_subap[ipos];
//subap j
int j = tab_subap[jpos];
//xy i
int xy_i = tab_xy[ipos];
//xy j
int xy_j = tab_xy[jpos];
const double sspSizem = teldiam / Nssp[m];
const double sspSizen = teldiam / Nssp[n];
const double kk = lambda2 / (sspSizem * sspSizen);
int type = xy_i * 2 + xy_j;
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
const double sspSizeml = sspSizeL[m * Nlayer + l];
const double sspSizenl = sspSizeL[n * Nlayer + l];
//test if the altitude layers is not higher than the LGS altitude
if ((sspSizeml > 0) && (sspSizenl > 0))
{
const int pos1 = m + i * Nw + l * Nw * Nsubap;
const int pos2 = n + j * Nw + l * Nw * Nsubap;
const double du = u[pos1] - u[pos2];
const double dv = v[pos1] - v[pos2];
const double s1 = sspSizeml * 0.5;
const double s2 = sspSizenl * 0.5;
const double ac = s1 - s2;
const double ad = s1 + s2;
const double bc = -ad; // initially -s1-s2;
const double bd = -ac; // initially -s1+s2;
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else //if ((type == 1) || (type == 2))
{
const double s0 = sqrt(s1 * s1 + s2 * s2); //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = (s1 > s2) ? 1. - s2 / s1 : 1. - s1 / s2; // Nono's style ....
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l] * (1. - dd * dd);
}
}
}
return (double)covar;
}
__device__ double compute_element_ts(int ipos, int jpos, double convert, double *X, double *Y,
long *Nssp, double pasDPHI, double *tabDPHI, long *indexL0, double *cn2,
int Ndphi, int Nw, int Nlayer, int Nsubap, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS Nw-1
//subap i
int i = ipos < Nsubap ? ipos : ipos - Nsubap;
//subap j
int j = jpos < Nsubap ? jpos : jpos - Nsubap;
//xy i
int xy_i = ipos < Nsubap ? 0 : 1;
//xy j
int xy_j = jpos < Nsubap ? 0 : 1;
const double sspSize = teldiam / Nssp[Nw-1];
const double kk = lambda2 / (sspSize * sspSize);
int type = xy_i * 2 + xy_j;
const double s = sspSize * 0.5;
const double ac = 0.0;
const double ad = 2.0 * s;
const double bc = -ad;
const double bd = 0.0;
const double du = X[Nsubap*(Nw-1)+i] - X[Nsubap*(Nw-1)+j];
const double dv = Y[Nsubap*(Nw-1)+i] - Y[Nsubap*(Nw-1)+j];
//const double du = X[Nw-1 + i * Nw] - X[Nw-1 + j * Nw];
//const double dv = Y[Nw-1 + i * Nw] - Y[Nw-1 + j * Nw];
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
//test if the altitude layers is not higher than the LGS altitude
if (sspSize > 0)
{
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l];
else
{
const double s0 = 1.41421*s; //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = 0;
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l] * (1. - dd * dd);
}
}
}
return (double)covar;
}
__global__ void matcov_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
int *tab_wfs, int* tab_subap, int* tab_xy, double convert, double *sspSizeL,
long *Nssp, double *u, double *v, double pasDPHI,double *tabDPHI, long *indexL0,
double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap, int type_mat, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
if ((type_mat == 3) || (gx <= gy)) {
// call the generation function
data[0] = compute_element(gx, gy, tab_wfs, tab_subap, tab_xy,convert,sspSizeL,Nssp,u,v,pasDPHI,tabDPHI,
indexL0,cn2,Ndphi,Nw,Nlayer,Nsubap,type_mat,teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
__global__ void matts_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
double convert, double *X, double *Y, long *Nssp, double pasDPHI,double *tabDPHI,
long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
// call the generation function
data[0] = compute_element_ts(gx, gy, convert,X, Y,Nssp,pasDPHI,tabDPHI,
indexL0,cn2,Ndphi,Nw,Nlayer,Nsubap,teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
__global__ void matcov_gpu_kernel_copy(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
//data += ly * lda + lx;
if (gx > gy) {
// call the generation function
data[ly * lda + lx] = data[ly + lx * lda];
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
//extern "C"
void matcov_gpu(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
hipError_t e;
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const long Nw = tomo.Nw;
const long Nlayer = tomo.Nlayer;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
long indexL0[Nlayer]; //link between index in L0 and index in L0diff
double *tabDPHI_d;
int Nl0_; // used to know the size of the array
tabDPHI_d = tabulateDPHI_gpu(tomo, Ndphi, indexL0, (int*)&Nl0_,convert);
long *indexL0_d;
//printf("sizeof indexL0 is %.2f KB\n", Nlayer*sizeof(long)/1024.0);
e = hipMalloc((void**)&indexL0_d, Nlayer*sizeof(long));
process_error(e, "alloc gpu indexL0_d");
e = hipMemcpy(indexL0_d, indexL0, Nlayer*sizeof(long), hipMemcpyHostToDevice);
process_error(e, "copy gpu indexL0_d");
// %%%%%%% Computation of the sub-apertures positions and sizes %%%%%%%%%%%
// u, v :arrays containing all the sub-apertures coordinates of all WFS, one after the other
// u[0][1][3] is the X-coordinate of subap number 3 of wfs number 0 at altitude 3
double* u_d;
//printf("sizeof u is %.2f KB\n", Nlayer*tomo.Nsubap[0]*Nw*sizeof(double)/1024.0);
e = hipMalloc((void**)&u_d, Nlayer*tomo.Nsubap[0]*Nw*sizeof(double));
process_error(e, "alloc gpu u_d");
double* v_d;
e = hipMalloc((void**)&v_d, Nlayer*tomo.Nsubap[0]*Nw*sizeof(double));
process_error(e, "alloc gpu v_d");
//Computes u and v
subap_position_gpu(tomo, u_d, v_d);
double *sspSizeL = (double *)malloc(sizeof(double)*Nw*Nlayer);
for (int cc = 0; cc < Nw * Nlayer; cc++) {
int n = cc / Nlayer;
int l = cc - n * Nlayer;
sspSizeL[cc] = tomo.sspSize[n] * (1. - tomo.GsAlt[n] * tomo.h[l]);
}
double *sspSizeL_d;
//printf("sizeof sspSizeL is %.2f KB\n", Nw*Nlayer*sizeof(double)/1024.0);
e = hipMalloc((void**)&sspSizeL_d, Nw*Nlayer*sizeof(double));
process_error(e, "alloc gpu sspSizeL_d");
e = hipMemcpy(sspSizeL_d, sspSizeL, Nw*Nlayer*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu sspSizeL_d");
int *tab_wfs;
tab_wfs = (int*)malloc(nrows*sizeof(int));
int *tab_subap;
tab_subap = (int*)malloc(nrows*sizeof(int));
int *tab_xy;
tab_xy = (int*)malloc(nrows*sizeof(int));
long ts = Nw - 1;//Truth sensor : ts
int cpt = 0;
for (int cc=0;cc<Nw;cc++) {
if (cc != ts) {
int nslps = tomo.Nsubap[cc]*2;
for (int ccc=0;ccc<nslps;ccc++) {
if (cc > ts) tab_wfs[ccc+cpt] = cc - 1;
else tab_wfs[ccc+cpt] = cc;
if (ccc < nslps/2) {
tab_subap[ccc+cpt] = ccc;
tab_xy[ccc+cpt] = 0;
} else {
tab_subap[ccc+cpt] = ccc - nslps/2;
tab_xy[ccc+cpt] = 1;
}
}
cpt += nslps;
}
}
//for(int ah = 0; ah < nrows; ah++)
// printf("[%5d]: tab_wfs = %6d, tab_subap = %6d, tab_xy = %6d\n", ah, tab_wfs[ah], tab_subap[ah], tab_xy[ah]);
int *tab_wfs_d;
//printf("sizeof tab_wfs is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_wfs_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_wfs_d");
e = hipMemcpy(tab_wfs_d, tab_wfs, nrows*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_wfs_d");
int *tab_subap_d;
//printf("sizeof tab_subap is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_subap_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_subap_d");
e = hipMemcpy(tab_subap_d, tab_subap, nrows*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_subap_d");
int *tab_xy_d;
//printf("sizeof tab_xy is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_xy_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_xy_d");
e = hipMemcpy(tab_xy_d, tab_xy, nrows*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_xy_d");
double *cn2_d;
//printf("sizeof cn2_d is %.2f KB\n", Nlayer*sizeof(double)/1024.0);
e = hipMalloc((void**)&cn2_d, Nlayer*sizeof(double));
process_error(e, "alloc gpu cn2_d");
e = hipMemcpy(cn2_d, tomo.cn2, Nlayer*sizeof(double), hipMemcpyHostToDevice);
process_error(e, "copy gpu cn2_d");
long *Nssp_d;
e = hipMalloc((void**)&Nssp_d, Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = hipMemcpy(Nssp_d, tomo.Nssp, Nw*sizeof(long), hipMemcpyHostToDevice);
process_error(e, "copy gpu Nssp_d");
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
int type_mat = tomo.part;
//printf("Nlayer = %d \n", Nlayer);
hipLaunchKernelGGL(( matcov_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda, tab_wfs_d, tab_subap_d, tab_xy_d,
convert,sspSizeL_d,Nssp_d,u_d,v_d,pasDPHI,tabDPHI_d,indexL0_d,cn2_d,
Ndphi,Nw,Nlayer,Nsubap,type_mat,tomo.DiamTel);
hipLaunchKernelGGL(( matcov_gpu_kernel_copy), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda);
if (sspSizeL) free(sspSizeL);
if (tab_wfs) free(tab_wfs);
if (tab_subap) free(tab_subap);
if (tab_xy) free(tab_xy);
if (sspSizeL_d) hipFree(sspSizeL_d);
if (tab_wfs_d) hipFree(tab_wfs_d);
if (tab_subap_d) hipFree(tab_subap_d);
if (tab_xy_d) hipFree(tab_xy_d);
if (indexL0_d) hipFree(indexL0_d);
if (tabDPHI_d) hipFree(tabDPHI_d);
if (cn2_d) hipFree(cn2_d);
if (Nssp_d) hipFree(Nssp_d);
if (u_d) hipFree(u_d);;
if (v_d) hipFree(v_d);;
}
//======================================================================================================
// V3
//======================================================================================================
void init_tomo_gpu(struct tomo_gpu_struct *tomo_gpu, struct tomo_struct tomo){
hipError_t e;
e = hipMalloc((void**)&(tomo_gpu->indexL0_d), tomo.Nlayer*sizeof(long));
process_error(e, "alloc gpu indexL0_d");
e = hipMalloc((void**)&(tomo_gpu->u_d), tomo.Nlayer*tomo.Nsubap[0]*tomo.Nw*sizeof(double));
process_error(e, "alloc gpu u_d");
e = hipMalloc((void**)&(tomo_gpu->v_d), tomo.Nlayer*tomo.Nsubap[0]*tomo.Nw*sizeof(double));
process_error(e, "alloc gpu v_d");
e = hipMalloc((void**)&(tomo_gpu->sspSizeL_d), tomo.Nw*tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu sspSizeL_d");
e = hipMalloc((void**)&(tomo_gpu->cn2_d), tomo.Nw*tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu cn2_d");
e = hipMalloc((void**)&(tomo_gpu->h_d), tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu h_d");
e = hipMalloc((void**)&(tomo_gpu->Nssp_d), tomo.Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = hipMalloc((void**)&(tomo_gpu->ioff_d), tomo.Nw*sizeof(long));
process_error(e, "alloc gpu ioff_d");
e = hipMalloc((void**)&(tomo_gpu->alphaX_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaX_d");
e = hipMalloc((void**)&(tomo_gpu->alphaY_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaY_d");
e = hipMalloc((void**)&(tomo_gpu->GsAlt_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu GsAlt_d");
e = hipMalloc((void**)&(tomo_gpu->diamPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu diamPup_d");
e = hipMalloc((void**)&(tomo_gpu->thetaML_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu thetaML_d");
e = hipMalloc((void**)&(tomo_gpu->X_d), tomo.Nx*sizeof(double));
process_error(e, "alloc gpu X_d");
e = hipMalloc((void**)&(tomo_gpu->Y_d), tomo.Nx*sizeof(double));
process_error(e, "alloc gpu Y_d");
e = hipMalloc((void**)&(tomo_gpu->XPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu XPup_d");
e = hipMalloc((void**)&(tomo_gpu->YPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu YPup_d");
//printf("dims : %d %d %d\n",tomo.Nsubap[tomo.Nw-1],tomo.Nsubap[0],tomo.Nx);
/*
e = hipMalloc((void**)&(tomo_gpu->Cmm_d), tomo.Nw*tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = hipMalloc((void**)&(tomo_gpu->Cpm_d), tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = hipMalloc((void**)&(tomo_gpu->R_d), tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
*/
tomo_gpu->L0diff_d = NULL;
tomo_gpu->tabDPHI_d = NULL;
e = hipStreamCreate(&(tomo_gpu->matcov_stream));
process_error(e, "create matcov stream");
}
void free_tomo_gpu(struct tomo_gpu_struct *tomo_gpu){
hipError_t e;
if (tomo_gpu->u_d) e = hipFree(tomo_gpu->u_d);
process_error(e, "free gpu u_d");
if (tomo_gpu->v_d) e = hipFree(tomo_gpu->v_d);
process_error(e, "free gpu v_d");
if (tomo_gpu->sspSizeL_d) e = hipFree(tomo_gpu->sspSizeL_d) ;
process_error(e, "free gpu sspSizeL_d");
if (tomo_gpu->cn2_d) e = hipFree(tomo_gpu->cn2_d);
process_error(e, "free gpu cn2_d");
if (tomo_gpu->h_d) e = hipFree(tomo_gpu->h_d);
process_error(e, "free gpu h_d");
if (tomo_gpu->indexL0_d) e = hipFree(tomo_gpu->indexL0_d);
process_error(e, "free gpu indexL0_d");
if (tomo_gpu->Nssp_d) e = hipFree(tomo_gpu->Nssp_d);
process_error(e, "free gpu Nssp_d");
if (tomo_gpu->ioff_d) e = hipFree(tomo_gpu->ioff_d);
process_error(e, "free gpu ioff_d");
if (tomo_gpu->alphaX_d) e = hipFree(tomo_gpu->alphaX_d);
process_error(e, "free gpu alphaX_d");
if (tomo_gpu->alphaY_d) e = hipFree(tomo_gpu->alphaY_d);
process_error(e, "free gpu alphaY_d");
if (tomo_gpu->GsAlt_d) e = hipFree(tomo_gpu->GsAlt_d);
process_error(e, "free gpu GsAlt_d");
if (tomo_gpu->diamPup_d) e = hipFree(tomo_gpu->diamPup_d);
process_error(e, "free gpu diamPup_d");
if (tomo_gpu->thetaML_d) e = hipFree(tomo_gpu->thetaML_d);
process_error(e, "free gpu thetaML_d");
if (tomo_gpu->X_d) e = hipFree(tomo_gpu->X_d);
process_error(e, "free gpu X_d");
if (tomo_gpu->Y_d) e = hipFree(tomo_gpu->Y_d);
process_error(e, "free gpu Y_d");
if (tomo_gpu->XPup_d) e = hipFree(tomo_gpu->XPup_d);
process_error(e, "free gpu XPup_d");
if (tomo_gpu->YPup_d) e = hipFree(tomo_gpu->YPup_d);
process_error(e, "free gpu YPup_d");
/*
if (tomo_gpu->Cmm_d) e = hipFree(tomo_gpu->Cmm_d);
process_error(e, "free gpu YPup_d");
if (tomo_gpu->Cpm_d) e = hipFree(tomo_gpu->Cpm_d);
process_error(e, "free gpu YPup_d");
if (tomo_gpu->R_d) e = hipFree(tomo_gpu->R_d);
process_error(e, "free gpu YPup_d");
*/
if ((tomo_gpu->tabDPHI_d) != NULL) e = hipFree(tomo_gpu->tabDPHI_d);
process_error(e, "free gpu tabDPHI_d");
if ((tomo_gpu->L0diff_d) != NULL) e = hipFree(tomo_gpu->L0diff_d);
process_error(e, "free gpu L0diff_d");
// destroy matcov stream
e = hipStreamDestroy(tomo_gpu->matcov_stream);
process_error(e, "destroy matcov stream");
}
//------------------------------------------------------------------------------------
void tab_dphi_gpu(double *tab_dphi, struct tomo_struct tomo, struct tomo_gpu_struct *tomo_gpu, long Ndphi, double *L0diff_d, int Nl0, double convert)
//void tabulateDPHI_gpu(double* tabDPHI_d, double* rr_d,struct tomo_struct tomo, long Ndphi, long *indexL0_h)
/* DOCUMENT tabDPHI = tabulateDPHI(rr,tomo,Ndphi, indexL0)
<tomo> : structure with all the needed information
<Ndphi> : size of rr
<indexL0> : link between the index of the studied layer and the index of the precomputed one.
Computes the phase structure function for a separation rr(x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
Computes the phase structure for each different L0 and give a array (indexL0) to link the index of the layer i and the index of tabDPHI : for the layer l, DPHI = DPHI( du, dv, indexL0[l],rr,tabDPHI, convert).
SEE ALSO: DPHI
*/
{
// Assume one thread per element
int nblocks = (Ndphi*Nl0)/tabDPHI_thread_x + ( ((Ndphi*Nl0)%tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
hipLaunchKernelGGL(( tabulateDPHI_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, tomo_gpu->matcov_stream, tab_dphi, L0diff_d, Nl0, Ndphi, convert);
CudaCheckError();
}
//------------------------------------------------------------------------------------
//extern "C"
void sub_pos_gpu(struct tomo_gpu_struct *tomo_gpu, struct tomo_struct tomo)
//void subap_position_gpu(struct tomo_struct tomo, double ***u, double ***v)
/* DOCUMENT DOCUMENT subap_position(tomo, u, v)
<tomo> : structure with all the needed information.
<u> and <v> : 3d arrays containing the sub-apertures projected coordinates onto all the layers. u[0][2][1] is the X-coordinate of the subap 2 of the WFS 0 on the layer 1.
Computes the projected coordinates of all subapertures projected onto all the layer
*/
{
int msize = tomo.Nlayer * tomo.Nw * tomo.Nsubap[0];
int nblocks = msize / tabDPHI_thread_x + ( ( msize % tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
hipLaunchKernelGGL(( subposition_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, tomo_gpu->matcov_stream, tomo.Nw, tomo.Nsubap[0], tomo.Nlayer, tomo_gpu->alphaX_d,
tomo_gpu->alphaY_d,tomo_gpu->h_d, tomo_gpu->GsAlt_d,
tomo_gpu->Nssp_d, tomo_gpu->diamPup_d, tomo_gpu->thetaML_d,
tomo_gpu->ioff_d, tomo_gpu->X_d, tomo_gpu->Y_d,
tomo_gpu->XPup_d, tomo_gpu->YPup_d, tomo_gpu->u_d, tomo_gpu->v_d);
CudaCheckError();
}
void update_tomo_atm(struct tomo_gpu_struct *tomo_gpu,struct tomo_struct tomo) {
hipError_t e;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
e = hipMemcpyAsync(tomo_gpu->h_d, tomo.h, tomo.Nlayer*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu h_d");
e = hipMemcpyAsync(tomo_gpu->cn2_d, tomo.cn2, tomo.Nlayer*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu cn2_d");
double *sspSizeL = (double *)malloc(sizeof(double)*tomo.Nw*tomo.Nlayer);
for (int cc = 0; cc < tomo.Nw * tomo.Nlayer; cc++) {
int n = cc / tomo.Nlayer;
int l = cc - n * tomo.Nlayer;
sspSizeL[cc] = tomo.sspSize[n] * (1. - tomo.GsAlt[n] * tomo.h[l]);
}
e = hipMemcpyAsync(tomo_gpu->sspSizeL_d, sspSizeL, tomo.Nw*tomo.Nlayer*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu sspSizeL_d");
//Search the different L0 and build indexL0
const long Nlayer = tomo.Nlayer;
long i, j;
int cpt = 1;
double tmp[Nlayer];
long indexL0[Nlayer];
tmp[0] = tomo.L0[0];
indexL0[0] = 0;
for (i = 1; i < Nlayer; i++) {
j = 0;
const double l0 = tomo.L0[i];
while ((j < cpt) && (tmp[j] != l0)) {j++;}
indexL0[i] = j;
if (j == cpt) {
tmp[j] = l0;
cpt++;
}
}
e = hipMemcpyAsync((tomo_gpu->indexL0_d), indexL0, tomo.Nlayer*sizeof(long), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu indexL0_d");
tomo_gpu->Nl0 = cpt;
double L0diff[tomo_gpu->Nl0];
// allocate space for L0
if ((tomo_gpu->L0diff_d) != NULL){hipFree(tomo_gpu->L0diff_d);}
e = hipMalloc((void**)&(tomo_gpu->L0diff_d), tomo_gpu->Nl0*sizeof(double));
process_error(e, "alloc gpu L0diff_d");
for (i = 0; i < tomo_gpu->Nl0; i++) {
L0diff[i] = tmp[i];
}
// offload L0diff
e = hipMemcpyAsync(tomo_gpu->L0diff_d, L0diff, tomo_gpu->Nl0*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "offload L0diff");
//prcalcul de DPHI : que pour chaque diffrent L0
if ((tomo_gpu->tabDPHI_d) != NULL){hipFree(tomo_gpu->tabDPHI_d);}
e = hipMalloc((void**)&(tomo_gpu->tabDPHI_d), tomo_gpu->Nl0*Ndphi*sizeof(double));
process_error(e, "alloc gpu tabDPHI_d");
tab_dphi_gpu(tomo_gpu->tabDPHI_d, tomo, tomo_gpu, Ndphi, tomo_gpu->L0diff_d, tomo_gpu->Nl0,convert);
// %%%%%%% Computation of the sub-apertures positions and sizes %%%%%%%%%%%
// u, v :arrays containing all the sub-apertures coordinates of all WFS, one after the other
// u[0][1][3] is the X-coordinate of subap number 3 of wfs number 0 at altitude 3
//Computes u and v
sub_pos_gpu(tomo_gpu, tomo);
if (sspSizeL) free(sspSizeL);
hipStreamSynchronize(tomo_gpu->matcov_stream);
}
void update_tomo_sys(struct tomo_gpu_struct *tomo_gpu,struct tomo_struct tomo) {
hipError_t e;
long ioff[tomo.Nw];
ioff[0] = 0;
for (int i=1;i<tomo.Nw;i++) ioff[i] = ioff[i-1] + tomo.Nsubap[i-1];
e = hipMemcpyAsync(tomo_gpu->ioff_d, ioff, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu ioff_d");
e = hipMemcpyAsync(tomo_gpu->alphaX_d, tomo.alphaX, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu alphaX_d");
e = hipMemcpyAsync(tomo_gpu->alphaY_d, tomo.alphaY, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu alphaY_d");
e = hipMemcpyAsync(tomo_gpu->GsAlt_d, tomo.GsAlt, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu GsAlt_d");
e = hipMemcpyAsync(tomo_gpu->Nssp_d, tomo.Nssp, tomo.Nw*sizeof(long), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu Nssp_d");
e = hipMemcpyAsync(tomo_gpu->diamPup_d, tomo.diamPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu diamPup_d");
e = hipMemcpyAsync(tomo_gpu->XPup_d, tomo.XPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu XPup_d");
e = hipMemcpyAsync(tomo_gpu->YPup_d, tomo.YPup, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu YPup_d");
e = hipMemcpyAsync(tomo_gpu->thetaML_d, tomo.thetaML, tomo.Nw*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu thetaML_d");
e = hipMemcpyAsync(tomo_gpu->X_d, tomo.X, tomo.Nx*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu X_d");
e = hipMemcpyAsync(tomo_gpu->Y_d, tomo.Y, tomo.Nx*sizeof(double), hipMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu Y_d");
hipStreamSynchronize(tomo_gpu->matcov_stream);
}
//extern "C"
void matcov_gpu3(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
hipError_t e;
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const long Nw = tomo.Nw;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int type_mat = tomo.part;
int size = tomo.Nslopes - 2 * tomo.Nsubap[tomo.Nw-1];
int *tab_wfs;
tab_wfs = (int*)malloc(size*sizeof(int));
int *tab_subap;
tab_subap = (int*)malloc(size*sizeof(int));
int *tab_xy;
tab_xy = (int*)malloc(size*sizeof(int));
long ts = Nw - 1;//Truth sensor : ts
int cpt = 0;
for (int cc=0;cc<Nw;cc++) {
if (cc != ts) {
int nslps = tomo.Nsubap[cc]*2;
for (int ccc=0;ccc<nslps;ccc++) {
if (cc > ts) tab_wfs[ccc+cpt] = cc - 1;
else tab_wfs[ccc+cpt] = cc;
if (ccc < nslps/2) {
tab_subap[ccc+cpt] = ccc;
tab_xy[ccc+cpt] = 0;
} else {
tab_subap[ccc+cpt] = ccc - nslps/2;
tab_xy[ccc+cpt] = 1;
}
}
cpt += nslps;
}
}
int *tab_wfs_d;
//printf("sizeof tab_wfs is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_wfs_d, size*sizeof(int));
process_error(e, "alloc gpu tab_wfs_d");
e = hipMemcpy(tab_wfs_d, tab_wfs, size*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_wfs_d");
int *tab_subap_d;
//printf("sizeof tab_subap is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_subap_d, size*sizeof(int));
process_error(e, "alloc gpu tab_subap_d");
e = hipMemcpy(tab_subap_d, tab_subap, size*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_subap_d");
int *tab_xy_d;
//printf("sizeof tab_xy is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = hipMalloc((void**)&tab_xy_d, size*sizeof(int));
process_error(e, "alloc gpu tab_xy_d");
e = hipMemcpy(tab_xy_d, tab_xy, size*sizeof(int), hipMemcpyHostToDevice);
process_error(e, "copy gpu tab_xy_d");
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
hipLaunchKernelGGL(( matcov_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, tomo_gpu->matcov_stream, data, nrows, ncols, xoffset, yoffset, lda, tab_wfs_d, tab_subap_d, tab_xy_d,
convert,tomo_gpu->sspSizeL_d,tomo_gpu->Nssp_d,tomo_gpu->u_d,tomo_gpu->v_d,
pasDPHI,tomo_gpu->tabDPHI_d,tomo_gpu->indexL0_d,tomo_gpu->cn2_d,
Ndphi,tomo.Nw,tomo.Nlayer,Nsubap,type_mat,tomo.DiamTel);
if (type_mat == 1)
hipLaunchKernelGGL(( matcov_gpu_kernel_copy), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda);
hipStreamSynchronize(tomo_gpu->matcov_stream);
if (tab_wfs) free(tab_wfs);
if (tab_subap) free(tab_subap);
if (tab_xy) free(tab_xy);
if (tab_wfs_d) hipFree(tab_wfs_d);
if (tab_subap_d) hipFree(tab_subap_d);
if (tab_xy_d) hipFree(tab_xy_d);
}
void matts_gpu(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
const long Nw = tomo.Nw;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[Nw-1];
hipLaunchKernelGGL(( matts_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda,
convert,tomo_gpu->X_d,tomo_gpu->Y_d,tomo_gpu->Nssp_d,
pasDPHI,tomo_gpu->tabDPHI_d,tomo_gpu->indexL0_d,tomo_gpu->cn2_d,
Ndphi,tomo.Nw,tomo.Nlayer,Nsubap,tomo.DiamTel);
CudaCheckError();
}
__device__ double compute_element_noise(int ipos, int jpos, double convert, double *sspSizeL, long *Nssp, double *u, double *v,
double pasDPHI, double *tabDPHI, long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer,
int Nsubap, double *alphaX, double *alphaY, double lgs_cst, double noise_var, double spotWidth,
double dH_lgs, double alt_lgs, int type_mat, int nlgs, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS m
int m = ipos / (2 * Nsubap);
if (type_mat == 3) m = Nw-1;
//WFS n
int n = jpos / (2 * Nsubap);
if (type_mat == 2) n = Nw-1;
//subap i
int i = ipos % (2 * Nsubap);
//subap j
int j = jpos % (2 * Nsubap);
//xy i
int xy_i;
//xy j
int xy_j;
if (i>=Nsubap) {
i-= Nsubap;
xy_i = 1;
} else xy_i = 0;
if (j>=Nsubap) {
j-= Nsubap;
xy_j = 1;
} else xy_j = 0;
const double sspSizem = teldiam / Nssp[m];
const double sspSizen = teldiam / Nssp[n];
const double kk = lambda2 / (sspSizem * sspSizen);
int type = xy_i * 2 + xy_j;
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
const double sspSizeml = sspSizeL[m * Nlayer + l];
const double sspSizenl = sspSizeL[n * Nlayer + l];
//test if the altitude layers is not higher than the LGS altitude
if ((sspSizeml > 0) && (sspSizenl > 0))
{
const int pos1 = m + i * Nw + l * Nw * Nsubap;
const int pos2 = n + j * Nw + l * Nw * Nsubap;
const double du = u[pos1] - u[pos2];
const double dv = v[pos1] - v[pos2];
const double s1 = sspSizeml * 0.5;
const double s2 = sspSizenl * 0.5;
const double ac = s1 - s2;
const double ad = s1 + s2;
const double bc = -ad; // initially -s1-s2;
const double bd = -ac; // initially -s1+s2;
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else //if ((type == 1) || (type == 2))
{
const double s0 = sqrt(s1 * s1 + s2 * s2); //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = (s1 > s2) ? 1. - s2 / s1 : 1. - s1 / s2; // Nono's style ....
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l] * (1. - dd * dd);
}
}
}
// adding noise
if (m == n) {
if (m < nlgs) {
if (i == j) {
// lgs case
const int pos1 = m + i * Nw;
double x = u[pos1];
double y = v[pos1];
const double xwfs = alphaX[m] * 206265;
const double ywfs = alphaY[m] * 206265;
double lltx = 0;
double llty = 0;
const double lltnorm = sqrtf(xwfs*xwfs + ywfs*ywfs);
if (lltnorm != 0) {
lltx = xwfs / lltnorm * teldiam / 2.0;
llty = ywfs / lltnorm * teldiam / 2.0;
}
x -= lltx;
y -= llty;
x = 206265. * dH_lgs * x / alt_lgs / alt_lgs; // extension at Fwhm, in arcsec
y = 206265. * dH_lgs * y / alt_lgs / alt_lgs; // extension at Fwhm, in arcsec
const double lgsExt = sqrtf(x * x + y * y); // lengh of the extension
const double lgsTheta = x != 0 ? atanf( y / x) : 0.0; // angle of extension
const double totalExt = sqrtf( lgsExt * lgsExt + spotWidth * spotWidth);
// lengh of the extension including seeing, laser size, ...
const double ratio = totalExt / spotWidth;
const double noiseLongAxis = noise_var * ratio * ratio;
if (type == 0) covar += noiseLongAxis * cosf(lgsTheta) * cosf(lgsTheta) +
noise_var * sinf(lgsTheta) * sinf(lgsTheta);
else if (type == 3) covar += noiseLongAxis * sinf(lgsTheta) * sinf(lgsTheta) +
noise_var * cosf(lgsTheta) * cosf(lgsTheta);
else covar += (noiseLongAxis-noise_var) * sinf(lgsTheta) * cosf(lgsTheta);
}
if ((type == 0) || (type == 3))
covar += lgs_cst;
} else {
// ngs case
if (i==j) {
if ((type == 0) || (type == 3)) {
covar += noise_var;
}
}
}
}
return (double)covar;
}
__global__ void matcovnoise_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
double convert, double *sspSizeL, long *Nssp, double *u, double *v,
double pasDPHI, double *tabDPHI, long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer,
int Nsubap, double *alphaX, double *alphaY, double lgs_cst, double noise_var, double spotWidth,
double dH_lgs, double alt_lgs, int type_mat, int nlgs, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
if ((type_mat == 3) || (gx <= gy)) {
// call the generation function
data[0] = compute_element_noise(gx, gy, convert, sspSizeL, Nssp, u, v, pasDPHI, tabDPHI, indexL0, cn2, Ndphi, Nw, Nlayer,
Nsubap, alphaX, alphaY, lgs_cst, noise_var, spotWidth, dH_lgs, alt_lgs, type_mat, nlgs, teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
void matcov_gpu4(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int type_mat = tomo.part;
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
hipLaunchKernelGGL(( matcovnoise_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda, convert, tomo_gpu->sspSizeL_d,
tomo_gpu->Nssp_d, tomo_gpu->u_d, tomo_gpu->v_d, pasDPHI, tomo_gpu->tabDPHI_d,
tomo_gpu->indexL0_d, tomo_gpu->cn2_d, Ndphi, tomo.Nw, tomo.Nlayer,
Nsubap, tomo_gpu->alphaX_d, tomo_gpu->alphaY_d, tomo.lgs_cst, tomo.noise_var,
tomo.spot_width, tomo.lgs_depth, tomo.lgs_alt, type_mat, tomo.nlgs, tomo.DiamTel);
if (type_mat == 1)
hipLaunchKernelGGL(( matcov_gpu_kernel_copy), dim3(dimGrid), dim3(dimBlock), 0, 0, data, nrows, ncols, xoffset, yoffset, lda);
}
|
a72efcd1988788649e142316c8db3f7896d33a79.cu
|
#include<cuda_runtime.h>
#include<cuda_runtime_api.h>
#include<cublas.h>
#include<stdio.h>
#include "matcov.h"
#include "matcov_gpu.h"
/* Tuning parameters of tbulateDPHI kernel*/
#define tabDPHI_thread_x (256)
/* Tuning parameters of matcov GPU Kernel */
// Thread block size (x, y),
// max #threads per block is 512 for fermi and 1024 for kepler
#define matcov_thread_x (8)
#define matcov_thread_y (8)
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
//============================================================================================
//================================= AUX FUNCTIONS ============================================
//============================================================================================
void process_error(cudaError_t e, const char* str)
{
if(e != cudaSuccess)
{
printf("*** Error %s: %s \n", str, cudaGetErrorString(e));
exit(1);
}
}
//-----------------------------------------------------------------------
double* arr2dAlloc_gpu(long nbLin, long nbCol)
/* DOCUMENT array = arr2dAlloc(nblin,nbcol)
Allocates a 2d array (double).
*/
{
cudaError_t e;
double* tableau;
e = cudaMalloc((void**)&tableau, sizeof(double) * nbCol * nbLin);
process_error(e, "gpu alloc tableau2");
return tableau;
}
void arr2dFree_gpu(double *tableau)
/* DOCUMENT arr2dFree(array)
Free a 2d array (double).
*/
{
if(tableau)cudaFree(tableau);
}
//============================================================================================
//============================= tabDPHI KERNEL(s) ============================================
//============================================================================================
__device__ double macdo_x56_gpu(double x, int k)
/* DOCUMENT macdo_x56_gpu(x)
Computation of the function
f(x) = x^(5/6)*K_{5/6}(x)
using a series for the esimation of K_{5/6}, taken from Rod Conan thesis :
K_a(x)=1/2 \sum_{n=0}^\infty \frac{(-1)^n}{n!}
\left(\Gamma(-n-a) (x/2)^{2n+a} + \Gamma(-n+a) (x/2)^{2n-a} \right) ,
with a = 5/6.
Setting x22 = (x/2)^2, setting uda = (1/2)^a, and multiplying by x^a,
this becomes :
x^a * Ka(x) = 0.5 $ -1^n / n! [ G(-n-a).uda x22^(n+a) + G(-n+a)/uda x22^n ]
Then we use the following recurrence formulae on the following quantities :
G(-(n+1)-a) = G(-n-a) / -a-n-1
G(-(n+1)+a) = G(-n+a) / a-n-1
(n+1)! = n! * (n+1)
x22^(n+1) = x22^n * x22
and at each iteration on n, one will use the values already computed at step (n-1).
The values of G(a) and G(-a) are hardcoded instead of being computed.
The first term of the series has also been skipped, as it
vanishes with another term in the expression of Dphi.
SEE ALSO:
*/
{
const double a = 5. / 6.;
const double x2a = pow(x, 2. * a), x22 = x * x / 4.;
double x2n; // x^2.a, etc
double s = 0.0;
int n;
const double Ga[11] = { 0, 12.067619015983075, 5.17183672113560444,
0.795667187867016068, 0.0628158306210802181, 0.00301515986981185091,
9.72632216068338833e-05, 2.25320204494595251e-06, 3.93000356676612095e-08,
5.34694362825451923e-10, 5.83302941264329804e-12 };
const double Gma[11] = { -3.74878707653729304, -2.04479295083852408,
-0.360845814853857083, -0.0313778969438136685, -0.001622994669507603,
-5.56455315259749673e-05, -1.35720808599938951e-06,
-2.47515152461894642e-08, -3.50257291219662472e-10,
-3.95770950530691961e-12, -3.65327031259100284e-14 };
x2n = 0.5; // init (1/2) * x^0
s = Gma[0] * x2a;
s *= x2n;
// prepare recurrence iteration for next step
x2n *= x22; // x^n
#pragma unroll
for (n = 1; n <= 10; n++)
{
s += (Gma[n] * x2a + Ga[n]) * x2n;
// prepare recurrence iteration for next step
x2n *= x22; // x^n
}
return s;
}
//------------------------------------------------------------------------------------
__device__ double asymp_macdo_gpu(double x)
/* DOCUMENT asymp_macdo_gpu(x)
Computes a term involved in the computation of the phase struct
function with a finite outer scale according to the Von-Karman
model. The term involves the MacDonald function (modified bessel
function of second kind) K_{5/6}(x), and the algorithm uses the
asymptotic form for x ~ infinity.
Warnings :
- This function makes a doubleing point interrupt for x=0
and should not be used in this case.
- Works only for x>0.
SEE ALSO:
*/
{
// k2 is the value for
// gamma_R(5./6)*2^(-1./6)
const double k2 = 1.00563491799858928388289314170833;
const double k3 = 1.25331413731550012081; // sqrt(pi/2)
const double a1 = 0.22222222222222222222; // 2/9
const double a2 = -0.08641975308641974829; // -7/89
const double a3 = 0.08001828989483310284; // 175/2187
double res;
double x_1;
x_1 = 1. / x;
res = k2
- k3 * exp(-x) * pow(x, 1 / 3.)
* (1.0 + x_1 * (a1 + x_1 * (a2 + x_1 * a3)));
return res;
}
//------------------------------------------------------------------------------------
__device__ double rodconan_gpu(double r, double L0, int k)
/* DOCUMENT rodconan_gpu(r,L0,k=)
The phase structure function is computed from the expression
Dphi(r) = k1 * L0^(5./3) * (k2 - (2.pi.r/L0)^5/6 K_{5/6}(2.pi.r/L0))
For small r, the expression is computed from a development of
K_5/6 near 0. The value of k2 is not used, as this same value
appears in the series and cancels with k2.
For large r, the expression is taken from an asymptotic form.
SEE ALSO:
*/
{
const double pi = 3.1415926535897932384626433;
double res = 0;
// k1 is the value of :
// 2*gamma_R(11./6)*2^(-5./6)*pi^(-8./3)*(24*gamma_R(6./5)/5.)^(5./6);
const double k1 = 0.1716613621245709486;
const double dprf0 = (2 * pi / L0) * r;
// k2 is the value for gamma_R(5./6)*2^(-1./6),
// but is now unused
// k2 = 1.0056349179985892838;
// Xlim = 0.75*2*pi; // = 4.71239
if (dprf0 > 4.71239)
res = asymp_macdo_gpu(dprf0);
else
res = -macdo_x56_gpu(dprf0, k);
res *= k1 * pow(L0, 5. / 3);
return res;
}
__global__ void tabulateDPHI_gpu_kernel(double* tabDPHI_d, double* L0diff_d, long Nl0, long Ndphi, double convert)
{
const int tx = threadIdx.x;
const int ty = blockIdx.x;
const int tid = ty * blockDim.x + tx;
int l = tid / Ndphi;
int j = tid % Ndphi;
if(tid >= (Nl0*Ndphi) ) return;
tabDPHI_d[tid] = rodconan_gpu((double)j / convert, L0diff_d[l], 10);
//double* mytabDPHI = tabDPHI_d + (l * Ndphi);
//
//int j, k;
//#pragma unroll
//for(k = 0; k < (Ndphi/tabDPHI_thread_x); k++)
//{
// j = k * tabDPHI_thread_x + tx;
// mytabDPHI[j] = rodconan_gpu(rr_d[j], L0diff_d[l], 10);
//}
//
//k = (Ndphi/tabDPHI_thread_x);
//if(tx < (Ndphi%tabDPHI_thread_x) )
//{
// j = k * tabDPHI_thread_x + tx;
// mytabDPHI[j] = rodconan_gpu(rr_d[j], L0diff_d[l], 10);
//}
}
//------------------------------------------------------------------------------------
double* tabulateDPHI_gpu(struct tomo_struct tomo, long Ndphi, long *indexL0, int* Nl0_, double convert)
//void tabulateDPHI_gpu(double* tabDPHI_d, double* rr_d,struct tomo_struct tomo, long Ndphi, long *indexL0_h)
/* DOCUMENT tabDPHI = tabulateDPHI(rr,tomo,Ndphi, indexL0)
<tomo> : structure with all the needed information
<Ndphi> : size of rr
<indexL0> : link between the index of the studied layer and the index of the precomputed one.
Computes the phase structure function for a separation rr(x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
Computes the phase structure for each different L0 and give a array (indexL0) to link the index of the layer i and the index of tabDPHI : for the layer l, DPHI = DPHI( du, dv, indexL0[l],rr,tabDPHI, convert).
SEE ALSO: DPHI
*/
{
//Search the different L0 and build indexL0
const long Nlayer = tomo.Nlayer;
long i, j;
int cpt = 1;
double tmp[Nlayer];
cudaError_t e;
tmp[0] = tomo.L0[0];
indexL0[0] = 0;
for (i = 1; i < Nlayer; i++)
{
j = 0;
const double l0 = tomo.L0[i];
while ((j < cpt) && (tmp[j] != l0)) {j++;}
indexL0[i] = j;
if (j == cpt)
{
tmp[j] = l0;
cpt++;
}
}
const int Nl0 = cpt;
double L0diff[Nl0];
double* L0diff_d;
// allocate space for L0
e = cudaMalloc((void**)&L0diff_d, Nl0*sizeof(double));
process_error(e, "alloc gpu L0diff_d");
for (i = 0; i < Nl0; i++)
{
L0diff[i] = tmp[i];
}
// offload L0diff
e = cudaMemcpy(L0diff_d, L0diff, Nl0*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "offload L0diff");
//précalcul de DPHI : que pour chaque différent L0
double* tabDPHI_d = arr2dAlloc_gpu(Nl0, Ndphi);
// gpu kernel goes here
//for (l = 0; l < Nl0; l++)
//{
// #ifdef USE_OPENMP
// #pragma omp parallel num_threads(tomo.ncpu)
// #pragma omp for nowait
// #endif
// for (j = 0; j < Ndphi; j++)
// {
// tabDPHI[l][j] = rodconan_gpu(rr[j], L0diff[l], 10);
// }
//}
// Assume one thread per element
int nblocks = (Ndphi*Nl0)/tabDPHI_thread_x + ( ((Ndphi*Nl0)%tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
tabulateDPHI_gpu_kernel<<<dimGrid, dimBlock>>>(tabDPHI_d, L0diff_d, Nl0, Ndphi, convert);
if(L0diff_d)cudaFree(L0diff_d);
*Nl0_ = Nl0;
return tabDPHI_d;
}
//------------------------------------------------------------------------------------
__device__ double DPHI_gpu(double x, double y, long indexL0, double *tabDPHI, double convert, int Ndphi)
/* DOCUMENT dphi = DPHI(x,y,indexL0,rr,tabDPHI,convert) * r0^(-5./3)
<x> & <y> : separation between apertures
<indexL0> : index for the L0 taken into account
<rr> : array of distance between apertures
<tabDPHI> : array of precomputed DPHI
<convert> : relation between the index on tabDPHI and (x,y)
Computes the phase structure function for a separation (x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
SEE ALSO:
*/
{
double r = sqrt(x * x + y * y);
long i0 = (long) (r * convert);
long i1 = i0 + 1;
return ((r - (double)i0 / convert) * tabDPHI[indexL0 * Ndphi + i1]
+ ((double)i1 / convert - r) * tabDPHI[indexL0 * Ndphi + i0]);
}
//------------------------------------------------------------------------------------
__device__ double cov_XX(double du, double dv, double ac, double ad, double bc, double bd, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the XX-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du + ac, dv, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + ad, dv, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + bc, dv, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du + bd, dv, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__device__ double cov_YY(double du, double dv, double ac, double ad, double bc, double bd, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the YY-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du, dv + ac, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du, dv + ad, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du, dv + bc, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du, dv + bd, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__device__ double cov_XY(double du, double dv, double s0, double *tabDPHI, long indexL0, double convert, int Ndphi)
/* DOCUMENT
Compute the XY-covariance with the distance sqrt(du2+dv2). DPHI is precomputed on tabDPHI.
*/
{
return -DPHI_gpu(du + s0, dv - s0, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du + s0, dv + s0, indexL0, tabDPHI, convert, Ndphi)
+ DPHI_gpu(du - s0, dv - s0, indexL0, tabDPHI, convert, Ndphi)
- DPHI_gpu(du - s0, dv + s0, indexL0, tabDPHI, convert, Ndphi);
}
//------------------------------------------------------------------------------------
__global__ void subposition_gpu_kernel(long Nw, long Nsubap, long Nlayer, double *alphaX, double *alphaY,
double *h, double *GsAlt, long *Nssp, double *diamPup, double *thetaML,
long *ioff, double *X, double *Y, double *XPup, double *YPup,
double *u, double *v)
{
const int tx = threadIdx.x;
const int ty = blockIdx.x;
const int tid = ty * blockDim.x + tx;
long i;
long n;
long l;
const double rad = 3.14159265358979323846 / 180.;
if(tid >= (Nw * Nsubap * Nlayer) ) return;
l = tid / (Nw * Nsubap);
const int pos = tid - l * (Nsubap * Nw);
i = pos / Nw;
n = pos - i * Nw;
//tid = n + i * Nw + l * Nw * Nsubap
const double dX = alphaX[n] * h[l];
const double dY = alphaY[n] * h[l];
const double rr = 1. - h[l] * GsAlt[n];
const long nssp = Nssp[n];
//magnification factor
const double G = diamPup[n] / (double) (nssp);
//rotation angle
const double th = thetaML[n] * rad;
//taking magnification factor into account
const double xtp = X[ioff[n] + i] * G;
const double ytp = Y[ioff[n] + i] * G;
//taking rotation into account
double uu = xtp * cos(th) - ytp * sin(th);
double vv = xtp * sin(th) + ytp * cos(th);
//taking pupil offset into account
uu += XPup[n];
vv += YPup[n];
//Projection onto the layer
u[tid] = uu * rr + dX;
v[tid] = vv * rr + dY;
}
//------------------------------------------------------------------------------------
//extern "C"
void subap_position_gpu(struct tomo_struct tomo, double *u_d, double *v_d)
//void subap_position_gpu(struct tomo_struct tomo, double ***u, double ***v)
/* DOCUMENT DOCUMENT subap_position(tomo, u, v)
<tomo> : structure with all the needed information.
<u> and <v> : 3d arrays containing the sub-apertures projected coordinates onto all the layers. u[0][2][1] is the X-coordinate of the subap 2 of the WFS 0 on the layer 1.
Computes the projected coordinates of all subapertures projected onto all the layer
*/
{
cudaError_t e;
long ioff[tomo.Nw];
ioff[0] = 0;
for (int i=1;i<tomo.Nw;i++) ioff[i] = ioff[i-1] + tomo.Nsubap[i-1];
long* ioff_d;
e = cudaMalloc((void**)&ioff_d, tomo.Nw*sizeof(long));
process_error(e, "alloc gpu ioff_d");
e = cudaMemcpy(ioff_d, ioff, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu ioff_d");
double *alphaX_d;
e = cudaMalloc((void**)&alphaX_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaX_d");
e = cudaMemcpy(alphaX_d, tomo.alphaX, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu alphaX_d");
double *alphaY_d;
e = cudaMalloc((void**)&alphaY_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaY_d");
e = cudaMemcpy(alphaY_d, tomo.alphaY, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu alphaY_d");
double *h_d;
e = cudaMalloc((void**)&h_d, tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu h_d");
e = cudaMemcpy(h_d, tomo.h, tomo.Nlayer*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu h_d");
double *GsAlt_d;
e = cudaMalloc((void**)&GsAlt_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu GsAlt_d");
e = cudaMemcpy(GsAlt_d, tomo.GsAlt, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu GsAlt_d");
long *Nssp_d;
e = cudaMalloc((void**)&Nssp_d, tomo.Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = cudaMemcpy(Nssp_d, tomo.Nssp, tomo.Nw*sizeof(long), cudaMemcpyHostToDevice);
process_error(e, "copy gpu Nssp_d");
double *diamPup_d;
e = cudaMalloc((void**)&diamPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu diamPup_d");
e = cudaMemcpy(diamPup_d, tomo.diamPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu diamPup_d");
double *thetaML_d;
e = cudaMalloc((void**)&thetaML_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu thetaML_d");
e = cudaMemcpy(thetaML_d, tomo.thetaML, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu thetaML_d");
double *X_d;
e = cudaMalloc((void**)&X_d, tomo.Nx*sizeof(double));
process_error(e, "alloc gpu X_d");
e = cudaMemcpy(X_d, tomo.X, tomo.Nx*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu X_d");
double *Y_d;
e = cudaMalloc((void**)&Y_d, tomo.Nx*sizeof(double));
process_error(e, "alloc gpu Y_d");
e = cudaMemcpy(Y_d, tomo.Y, tomo.Nx*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu Y_d");
double *XPup_d;
e = cudaMalloc((void**)&XPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu XPup_d");
e = cudaMemcpy(XPup_d, tomo.XPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu XPup_d");
double *YPup_d;
e = cudaMalloc((void**)&YPup_d, tomo.Nw*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = cudaMemcpy(YPup_d, tomo.YPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu YPup_d");
int msize = tomo.Nlayer * tomo.Nw * tomo.Nsubap[0];
int nblocks = msize / tabDPHI_thread_x + ( ( msize % tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
subposition_gpu_kernel<<<dimGrid, dimBlock>>>(tomo.Nw, tomo.Nsubap[0], tomo.Nlayer, alphaX_d, alphaY_d,
h_d, GsAlt_d, Nssp_d, diamPup_d, thetaML_d, ioff_d, X_d,
Y_d, XPup_d, YPup_d, u_d, v_d);
if (ioff_d) cudaFree(ioff_d);
if (alphaX_d) cudaFree(alphaX_d);
if (alphaY_d) cudaFree(alphaY_d);
if (h_d) cudaFree(h_d);
if (GsAlt_d) cudaFree(GsAlt_d);
if (Nssp_d) cudaFree(Nssp_d);
if (diamPup_d) cudaFree(diamPup_d);
if (thetaML_d) cudaFree(thetaML_d);
if (X_d) cudaFree(X_d);
if (Y_d) cudaFree(Y_d);
if (XPup_d) cudaFree(XPup_d);
if (YPup_d) cudaFree(YPup_d);
}
//============================================================================================
//============================= GENERATION KERNEL ============================================
//============================================================================================
__device__ double compute_element(int ipos, int jpos, int *tab_wfs, int* tab_subap, int* tab_xy, double convert,
double *sspSizeL, long *Nssp, double *u, double *v, double pasDPHI,double *tabDPHI,
long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap,
int type_mat, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS m
int m = tab_wfs[ipos];
if (type_mat == 3) m = Nw-1;
//WFS n
int n = tab_wfs[jpos];
if (type_mat == 2) n = Nw-1;
//subap i
int i = tab_subap[ipos];
//subap j
int j = tab_subap[jpos];
//xy i
int xy_i = tab_xy[ipos];
//xy j
int xy_j = tab_xy[jpos];
const double sspSizem = teldiam / Nssp[m];
const double sspSizen = teldiam / Nssp[n];
const double kk = lambda2 / (sspSizem * sspSizen);
int type = xy_i * 2 + xy_j;
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
const double sspSizeml = sspSizeL[m * Nlayer + l];
const double sspSizenl = sspSizeL[n * Nlayer + l];
//test if the altitude layers is not higher than the LGS altitude
if ((sspSizeml > 0) && (sspSizenl > 0))
{
const int pos1 = m + i * Nw + l * Nw * Nsubap;
const int pos2 = n + j * Nw + l * Nw * Nsubap;
const double du = u[pos1] - u[pos2];
const double dv = v[pos1] - v[pos2];
const double s1 = sspSizeml * 0.5;
const double s2 = sspSizenl * 0.5;
const double ac = s1 - s2;
const double ad = s1 + s2;
const double bc = -ad; // initially -s1-s2;
const double bd = -ac; // initially -s1+s2;
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else //if ((type == 1) || (type == 2))
{
const double s0 = sqrt(s1 * s1 + s2 * s2); //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = (s1 > s2) ? 1. - s2 / s1 : 1. - s1 / s2; // Nono's style ....
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l] * (1. - dd * dd);
}
}
}
return (double)covar;
}
__device__ double compute_element_ts(int ipos, int jpos, double convert, double *X, double *Y,
long *Nssp, double pasDPHI, double *tabDPHI, long *indexL0, double *cn2,
int Ndphi, int Nw, int Nlayer, int Nsubap, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS Nw-1
//subap i
int i = ipos < Nsubap ? ipos : ipos - Nsubap;
//subap j
int j = jpos < Nsubap ? jpos : jpos - Nsubap;
//xy i
int xy_i = ipos < Nsubap ? 0 : 1;
//xy j
int xy_j = jpos < Nsubap ? 0 : 1;
const double sspSize = teldiam / Nssp[Nw-1];
const double kk = lambda2 / (sspSize * sspSize);
int type = xy_i * 2 + xy_j;
const double s = sspSize * 0.5;
const double ac = 0.0;
const double ad = 2.0 * s;
const double bc = -ad;
const double bd = 0.0;
const double du = X[Nsubap*(Nw-1)+i] - X[Nsubap*(Nw-1)+j];
const double dv = Y[Nsubap*(Nw-1)+i] - Y[Nsubap*(Nw-1)+j];
//const double du = X[Nw-1 + i * Nw] - X[Nw-1 + j * Nw];
//const double dv = Y[Nw-1 + i * Nw] - Y[Nw-1 + j * Nw];
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
//test if the altitude layers is not higher than the LGS altitude
if (sspSize > 0)
{
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l];
else
{
const double s0 = 1.41421*s; //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = 0;
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) *
kk * cn2[l] * (1. - dd * dd);
}
}
}
return (double)covar;
}
__global__ void matcov_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
int *tab_wfs, int* tab_subap, int* tab_xy, double convert, double *sspSizeL,
long *Nssp, double *u, double *v, double pasDPHI,double *tabDPHI, long *indexL0,
double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap, int type_mat, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
if ((type_mat == 3) || (gx <= gy)) {
// call the generation function
data[0] = compute_element(gx, gy, tab_wfs, tab_subap, tab_xy,convert,sspSizeL,Nssp,u,v,pasDPHI,tabDPHI,
indexL0,cn2,Ndphi,Nw,Nlayer,Nsubap,type_mat,teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
__global__ void matts_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
double convert, double *X, double *Y, long *Nssp, double pasDPHI,double *tabDPHI,
long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer, int Nsubap, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
// call the generation function
data[0] = compute_element_ts(gx, gy, convert,X, Y,Nssp,pasDPHI,tabDPHI,
indexL0,cn2,Ndphi,Nw,Nlayer,Nsubap,teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
__global__ void matcov_gpu_kernel_copy(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
//data += ly * lda + lx;
if (gx > gy) {
// call the generation function
data[ly * lda + lx] = data[ly + lx * lda];
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
//extern "C"
void matcov_gpu(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
cudaError_t e;
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const long Nw = tomo.Nw;
const long Nlayer = tomo.Nlayer;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
long indexL0[Nlayer]; //link between index in L0 and index in L0diff
double *tabDPHI_d;
int Nl0_; // used to know the size of the array
tabDPHI_d = tabulateDPHI_gpu(tomo, Ndphi, indexL0, (int*)&Nl0_,convert);
long *indexL0_d;
//printf("sizeof indexL0 is %.2f KB\n", Nlayer*sizeof(long)/1024.0);
e = cudaMalloc((void**)&indexL0_d, Nlayer*sizeof(long));
process_error(e, "alloc gpu indexL0_d");
e = cudaMemcpy(indexL0_d, indexL0, Nlayer*sizeof(long), cudaMemcpyHostToDevice);
process_error(e, "copy gpu indexL0_d");
// %%%%%%% Computation of the sub-apertures positions and sizes %%%%%%%%%%%
// u, v :arrays containing all the sub-apertures coordinates of all WFS, one after the other
// u[0][1][3] is the X-coordinate of subap number 3 of wfs number 0 at altitude 3
double* u_d;
//printf("sizeof u is %.2f KB\n", Nlayer*tomo.Nsubap[0]*Nw*sizeof(double)/1024.0);
e = cudaMalloc((void**)&u_d, Nlayer*tomo.Nsubap[0]*Nw*sizeof(double));
process_error(e, "alloc gpu u_d");
double* v_d;
e = cudaMalloc((void**)&v_d, Nlayer*tomo.Nsubap[0]*Nw*sizeof(double));
process_error(e, "alloc gpu v_d");
//Computes u and v
subap_position_gpu(tomo, u_d, v_d);
double *sspSizeL = (double *)malloc(sizeof(double)*Nw*Nlayer);
for (int cc = 0; cc < Nw * Nlayer; cc++) {
int n = cc / Nlayer;
int l = cc - n * Nlayer;
sspSizeL[cc] = tomo.sspSize[n] * (1. - tomo.GsAlt[n] * tomo.h[l]);
}
double *sspSizeL_d;
//printf("sizeof sspSizeL is %.2f KB\n", Nw*Nlayer*sizeof(double)/1024.0);
e = cudaMalloc((void**)&sspSizeL_d, Nw*Nlayer*sizeof(double));
process_error(e, "alloc gpu sspSizeL_d");
e = cudaMemcpy(sspSizeL_d, sspSizeL, Nw*Nlayer*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu sspSizeL_d");
int *tab_wfs;
tab_wfs = (int*)malloc(nrows*sizeof(int));
int *tab_subap;
tab_subap = (int*)malloc(nrows*sizeof(int));
int *tab_xy;
tab_xy = (int*)malloc(nrows*sizeof(int));
long ts = Nw - 1;//Truth sensor : ts
int cpt = 0;
for (int cc=0;cc<Nw;cc++) {
if (cc != ts) {
int nslps = tomo.Nsubap[cc]*2;
for (int ccc=0;ccc<nslps;ccc++) {
if (cc > ts) tab_wfs[ccc+cpt] = cc - 1;
else tab_wfs[ccc+cpt] = cc;
if (ccc < nslps/2) {
tab_subap[ccc+cpt] = ccc;
tab_xy[ccc+cpt] = 0;
} else {
tab_subap[ccc+cpt] = ccc - nslps/2;
tab_xy[ccc+cpt] = 1;
}
}
cpt += nslps;
}
}
//for(int ah = 0; ah < nrows; ah++)
// printf("[%5d]: tab_wfs = %6d, tab_subap = %6d, tab_xy = %6d\n", ah, tab_wfs[ah], tab_subap[ah], tab_xy[ah]);
int *tab_wfs_d;
//printf("sizeof tab_wfs is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_wfs_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_wfs_d");
e = cudaMemcpy(tab_wfs_d, tab_wfs, nrows*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_wfs_d");
int *tab_subap_d;
//printf("sizeof tab_subap is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_subap_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_subap_d");
e = cudaMemcpy(tab_subap_d, tab_subap, nrows*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_subap_d");
int *tab_xy_d;
//printf("sizeof tab_xy is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_xy_d, nrows*sizeof(int));
process_error(e, "alloc gpu tab_xy_d");
e = cudaMemcpy(tab_xy_d, tab_xy, nrows*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_xy_d");
double *cn2_d;
//printf("sizeof cn2_d is %.2f KB\n", Nlayer*sizeof(double)/1024.0);
e = cudaMalloc((void**)&cn2_d, Nlayer*sizeof(double));
process_error(e, "alloc gpu cn2_d");
e = cudaMemcpy(cn2_d, tomo.cn2, Nlayer*sizeof(double), cudaMemcpyHostToDevice);
process_error(e, "copy gpu cn2_d");
long *Nssp_d;
e = cudaMalloc((void**)&Nssp_d, Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = cudaMemcpy(Nssp_d, tomo.Nssp, Nw*sizeof(long), cudaMemcpyHostToDevice);
process_error(e, "copy gpu Nssp_d");
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
int type_mat = tomo.part;
//printf("Nlayer = %d \n", Nlayer);
matcov_gpu_kernel<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda, tab_wfs_d, tab_subap_d, tab_xy_d,
convert,sspSizeL_d,Nssp_d,u_d,v_d,pasDPHI,tabDPHI_d,indexL0_d,cn2_d,
Ndphi,Nw,Nlayer,Nsubap,type_mat,tomo.DiamTel);
matcov_gpu_kernel_copy<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda);
if (sspSizeL) free(sspSizeL);
if (tab_wfs) free(tab_wfs);
if (tab_subap) free(tab_subap);
if (tab_xy) free(tab_xy);
if (sspSizeL_d) cudaFree(sspSizeL_d);
if (tab_wfs_d) cudaFree(tab_wfs_d);
if (tab_subap_d) cudaFree(tab_subap_d);
if (tab_xy_d) cudaFree(tab_xy_d);
if (indexL0_d) cudaFree(indexL0_d);
if (tabDPHI_d) cudaFree(tabDPHI_d);
if (cn2_d) cudaFree(cn2_d);
if (Nssp_d) cudaFree(Nssp_d);
if (u_d) cudaFree(u_d);;
if (v_d) cudaFree(v_d);;
}
//======================================================================================================
// V3
//======================================================================================================
void init_tomo_gpu(struct tomo_gpu_struct *tomo_gpu, struct tomo_struct tomo){
cudaError_t e;
e = cudaMalloc((void**)&(tomo_gpu->indexL0_d), tomo.Nlayer*sizeof(long));
process_error(e, "alloc gpu indexL0_d");
e = cudaMalloc((void**)&(tomo_gpu->u_d), tomo.Nlayer*tomo.Nsubap[0]*tomo.Nw*sizeof(double));
process_error(e, "alloc gpu u_d");
e = cudaMalloc((void**)&(tomo_gpu->v_d), tomo.Nlayer*tomo.Nsubap[0]*tomo.Nw*sizeof(double));
process_error(e, "alloc gpu v_d");
e = cudaMalloc((void**)&(tomo_gpu->sspSizeL_d), tomo.Nw*tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu sspSizeL_d");
e = cudaMalloc((void**)&(tomo_gpu->cn2_d), tomo.Nw*tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu cn2_d");
e = cudaMalloc((void**)&(tomo_gpu->h_d), tomo.Nlayer*sizeof(double));
process_error(e, "alloc gpu h_d");
e = cudaMalloc((void**)&(tomo_gpu->Nssp_d), tomo.Nw*sizeof(long));
process_error(e, "alloc gpu Nssp_d");
e = cudaMalloc((void**)&(tomo_gpu->ioff_d), tomo.Nw*sizeof(long));
process_error(e, "alloc gpu ioff_d");
e = cudaMalloc((void**)&(tomo_gpu->alphaX_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaX_d");
e = cudaMalloc((void**)&(tomo_gpu->alphaY_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu alphaY_d");
e = cudaMalloc((void**)&(tomo_gpu->GsAlt_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu GsAlt_d");
e = cudaMalloc((void**)&(tomo_gpu->diamPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu diamPup_d");
e = cudaMalloc((void**)&(tomo_gpu->thetaML_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu thetaML_d");
e = cudaMalloc((void**)&(tomo_gpu->X_d), tomo.Nx*sizeof(double));
process_error(e, "alloc gpu X_d");
e = cudaMalloc((void**)&(tomo_gpu->Y_d), tomo.Nx*sizeof(double));
process_error(e, "alloc gpu Y_d");
e = cudaMalloc((void**)&(tomo_gpu->XPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu XPup_d");
e = cudaMalloc((void**)&(tomo_gpu->YPup_d), tomo.Nw*sizeof(double));
process_error(e, "alloc gpu YPup_d");
//printf("dims : %d %d %d\n",tomo.Nsubap[tomo.Nw-1],tomo.Nsubap[0],tomo.Nx);
/*
e = cudaMalloc((void**)&(tomo_gpu->Cmm_d), tomo.Nw*tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = cudaMalloc((void**)&(tomo_gpu->Cpm_d), tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
e = cudaMalloc((void**)&(tomo_gpu->R_d), tomo.Nsubap[0]*2*tomo.Nw*tomo.Nsubap[0]*2*sizeof(double));
process_error(e, "alloc gpu YPup_d");
*/
tomo_gpu->L0diff_d = NULL;
tomo_gpu->tabDPHI_d = NULL;
e = cudaStreamCreate(&(tomo_gpu->matcov_stream));
process_error(e, "create matcov stream");
}
void free_tomo_gpu(struct tomo_gpu_struct *tomo_gpu){
cudaError_t e;
if (tomo_gpu->u_d) e = cudaFree(tomo_gpu->u_d);
process_error(e, "free gpu u_d");
if (tomo_gpu->v_d) e = cudaFree(tomo_gpu->v_d);
process_error(e, "free gpu v_d");
if (tomo_gpu->sspSizeL_d) e = cudaFree(tomo_gpu->sspSizeL_d) ;
process_error(e, "free gpu sspSizeL_d");
if (tomo_gpu->cn2_d) e = cudaFree(tomo_gpu->cn2_d);
process_error(e, "free gpu cn2_d");
if (tomo_gpu->h_d) e = cudaFree(tomo_gpu->h_d);
process_error(e, "free gpu h_d");
if (tomo_gpu->indexL0_d) e = cudaFree(tomo_gpu->indexL0_d);
process_error(e, "free gpu indexL0_d");
if (tomo_gpu->Nssp_d) e = cudaFree(tomo_gpu->Nssp_d);
process_error(e, "free gpu Nssp_d");
if (tomo_gpu->ioff_d) e = cudaFree(tomo_gpu->ioff_d);
process_error(e, "free gpu ioff_d");
if (tomo_gpu->alphaX_d) e = cudaFree(tomo_gpu->alphaX_d);
process_error(e, "free gpu alphaX_d");
if (tomo_gpu->alphaY_d) e = cudaFree(tomo_gpu->alphaY_d);
process_error(e, "free gpu alphaY_d");
if (tomo_gpu->GsAlt_d) e = cudaFree(tomo_gpu->GsAlt_d);
process_error(e, "free gpu GsAlt_d");
if (tomo_gpu->diamPup_d) e = cudaFree(tomo_gpu->diamPup_d);
process_error(e, "free gpu diamPup_d");
if (tomo_gpu->thetaML_d) e = cudaFree(tomo_gpu->thetaML_d);
process_error(e, "free gpu thetaML_d");
if (tomo_gpu->X_d) e = cudaFree(tomo_gpu->X_d);
process_error(e, "free gpu X_d");
if (tomo_gpu->Y_d) e = cudaFree(tomo_gpu->Y_d);
process_error(e, "free gpu Y_d");
if (tomo_gpu->XPup_d) e = cudaFree(tomo_gpu->XPup_d);
process_error(e, "free gpu XPup_d");
if (tomo_gpu->YPup_d) e = cudaFree(tomo_gpu->YPup_d);
process_error(e, "free gpu YPup_d");
/*
if (tomo_gpu->Cmm_d) e = cudaFree(tomo_gpu->Cmm_d);
process_error(e, "free gpu YPup_d");
if (tomo_gpu->Cpm_d) e = cudaFree(tomo_gpu->Cpm_d);
process_error(e, "free gpu YPup_d");
if (tomo_gpu->R_d) e = cudaFree(tomo_gpu->R_d);
process_error(e, "free gpu YPup_d");
*/
if ((tomo_gpu->tabDPHI_d) != NULL) e = cudaFree(tomo_gpu->tabDPHI_d);
process_error(e, "free gpu tabDPHI_d");
if ((tomo_gpu->L0diff_d) != NULL) e = cudaFree(tomo_gpu->L0diff_d);
process_error(e, "free gpu L0diff_d");
// destroy matcov stream
e = cudaStreamDestroy(tomo_gpu->matcov_stream);
process_error(e, "destroy matcov stream");
}
//------------------------------------------------------------------------------------
void tab_dphi_gpu(double *tab_dphi, struct tomo_struct tomo, struct tomo_gpu_struct *tomo_gpu, long Ndphi, double *L0diff_d, int Nl0, double convert)
//void tabulateDPHI_gpu(double* tabDPHI_d, double* rr_d,struct tomo_struct tomo, long Ndphi, long *indexL0_h)
/* DOCUMENT tabDPHI = tabulateDPHI(rr,tomo,Ndphi, indexL0)
<tomo> : structure with all the needed information
<Ndphi> : size of rr
<indexL0> : link between the index of the studied layer and the index of the precomputed one.
Computes the phase structure function for a separation rr(x,y).
The r0 is not taken into account : the final result of DPHI(x,y,L0)
has to be scaled with r0^-5/3, with r0 expressed in meters, to get
the right value.
Computes the phase structure for each different L0 and give a array (indexL0) to link the index of the layer i and the index of tabDPHI : for the layer l, DPHI = DPHI( du, dv, indexL0[l],rr,tabDPHI, convert).
SEE ALSO: DPHI
*/
{
// Assume one thread per element
int nblocks = (Ndphi*Nl0)/tabDPHI_thread_x + ( ((Ndphi*Nl0)%tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
tabulateDPHI_gpu_kernel<<<dimGrid, dimBlock, 0, tomo_gpu->matcov_stream>>>(tab_dphi, L0diff_d, Nl0, Ndphi, convert);
CudaCheckError();
}
//------------------------------------------------------------------------------------
//extern "C"
void sub_pos_gpu(struct tomo_gpu_struct *tomo_gpu, struct tomo_struct tomo)
//void subap_position_gpu(struct tomo_struct tomo, double ***u, double ***v)
/* DOCUMENT DOCUMENT subap_position(tomo, u, v)
<tomo> : structure with all the needed information.
<u> and <v> : 3d arrays containing the sub-apertures projected coordinates onto all the layers. u[0][2][1] is the X-coordinate of the subap 2 of the WFS 0 on the layer 1.
Computes the projected coordinates of all subapertures projected onto all the layer
*/
{
int msize = tomo.Nlayer * tomo.Nw * tomo.Nsubap[0];
int nblocks = msize / tabDPHI_thread_x + ( ( msize % tabDPHI_thread_x) != 0);
dim3 dimBlock(tabDPHI_thread_x, 1);
dim3 dimGrid(nblocks, 1);
subposition_gpu_kernel<<<dimGrid, dimBlock, 0, tomo_gpu->matcov_stream>>>(tomo.Nw, tomo.Nsubap[0], tomo.Nlayer, tomo_gpu->alphaX_d,
tomo_gpu->alphaY_d,tomo_gpu->h_d, tomo_gpu->GsAlt_d,
tomo_gpu->Nssp_d, tomo_gpu->diamPup_d, tomo_gpu->thetaML_d,
tomo_gpu->ioff_d, tomo_gpu->X_d, tomo_gpu->Y_d,
tomo_gpu->XPup_d, tomo_gpu->YPup_d, tomo_gpu->u_d, tomo_gpu->v_d);
CudaCheckError();
}
void update_tomo_atm(struct tomo_gpu_struct *tomo_gpu,struct tomo_struct tomo) {
cudaError_t e;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
e = cudaMemcpyAsync(tomo_gpu->h_d, tomo.h, tomo.Nlayer*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu h_d");
e = cudaMemcpyAsync(tomo_gpu->cn2_d, tomo.cn2, tomo.Nlayer*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu cn2_d");
double *sspSizeL = (double *)malloc(sizeof(double)*tomo.Nw*tomo.Nlayer);
for (int cc = 0; cc < tomo.Nw * tomo.Nlayer; cc++) {
int n = cc / tomo.Nlayer;
int l = cc - n * tomo.Nlayer;
sspSizeL[cc] = tomo.sspSize[n] * (1. - tomo.GsAlt[n] * tomo.h[l]);
}
e = cudaMemcpyAsync(tomo_gpu->sspSizeL_d, sspSizeL, tomo.Nw*tomo.Nlayer*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu sspSizeL_d");
//Search the different L0 and build indexL0
const long Nlayer = tomo.Nlayer;
long i, j;
int cpt = 1;
double tmp[Nlayer];
long indexL0[Nlayer];
tmp[0] = tomo.L0[0];
indexL0[0] = 0;
for (i = 1; i < Nlayer; i++) {
j = 0;
const double l0 = tomo.L0[i];
while ((j < cpt) && (tmp[j] != l0)) {j++;}
indexL0[i] = j;
if (j == cpt) {
tmp[j] = l0;
cpt++;
}
}
e = cudaMemcpyAsync((tomo_gpu->indexL0_d), indexL0, tomo.Nlayer*sizeof(long), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu indexL0_d");
tomo_gpu->Nl0 = cpt;
double L0diff[tomo_gpu->Nl0];
// allocate space for L0
if ((tomo_gpu->L0diff_d) != NULL){cudaFree(tomo_gpu->L0diff_d);}
e = cudaMalloc((void**)&(tomo_gpu->L0diff_d), tomo_gpu->Nl0*sizeof(double));
process_error(e, "alloc gpu L0diff_d");
for (i = 0; i < tomo_gpu->Nl0; i++) {
L0diff[i] = tmp[i];
}
// offload L0diff
e = cudaMemcpyAsync(tomo_gpu->L0diff_d, L0diff, tomo_gpu->Nl0*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "offload L0diff");
//précalcul de DPHI : que pour chaque différent L0
if ((tomo_gpu->tabDPHI_d) != NULL){cudaFree(tomo_gpu->tabDPHI_d);}
e = cudaMalloc((void**)&(tomo_gpu->tabDPHI_d), tomo_gpu->Nl0*Ndphi*sizeof(double));
process_error(e, "alloc gpu tabDPHI_d");
tab_dphi_gpu(tomo_gpu->tabDPHI_d, tomo, tomo_gpu, Ndphi, tomo_gpu->L0diff_d, tomo_gpu->Nl0,convert);
// %%%%%%% Computation of the sub-apertures positions and sizes %%%%%%%%%%%
// u, v :arrays containing all the sub-apertures coordinates of all WFS, one after the other
// u[0][1][3] is the X-coordinate of subap number 3 of wfs number 0 at altitude 3
//Computes u and v
sub_pos_gpu(tomo_gpu, tomo);
if (sspSizeL) free(sspSizeL);
cudaStreamSynchronize(tomo_gpu->matcov_stream);
}
void update_tomo_sys(struct tomo_gpu_struct *tomo_gpu,struct tomo_struct tomo) {
cudaError_t e;
long ioff[tomo.Nw];
ioff[0] = 0;
for (int i=1;i<tomo.Nw;i++) ioff[i] = ioff[i-1] + tomo.Nsubap[i-1];
e = cudaMemcpyAsync(tomo_gpu->ioff_d, ioff, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu ioff_d");
e = cudaMemcpyAsync(tomo_gpu->alphaX_d, tomo.alphaX, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu alphaX_d");
e = cudaMemcpyAsync(tomo_gpu->alphaY_d, tomo.alphaY, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu alphaY_d");
e = cudaMemcpyAsync(tomo_gpu->GsAlt_d, tomo.GsAlt, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu GsAlt_d");
e = cudaMemcpyAsync(tomo_gpu->Nssp_d, tomo.Nssp, tomo.Nw*sizeof(long), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu Nssp_d");
e = cudaMemcpyAsync(tomo_gpu->diamPup_d, tomo.diamPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu diamPup_d");
e = cudaMemcpyAsync(tomo_gpu->XPup_d, tomo.XPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu XPup_d");
e = cudaMemcpyAsync(tomo_gpu->YPup_d, tomo.YPup, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu YPup_d");
e = cudaMemcpyAsync(tomo_gpu->thetaML_d, tomo.thetaML, tomo.Nw*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu thetaML_d");
e = cudaMemcpyAsync(tomo_gpu->X_d, tomo.X, tomo.Nx*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu X_d");
e = cudaMemcpyAsync(tomo_gpu->Y_d, tomo.Y, tomo.Nx*sizeof(double), cudaMemcpyHostToDevice, tomo_gpu->matcov_stream);
process_error(e, "copy gpu Y_d");
cudaStreamSynchronize(tomo_gpu->matcov_stream);
}
//extern "C"
void matcov_gpu3(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
cudaError_t e;
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const long Nw = tomo.Nw;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int type_mat = tomo.part;
int size = tomo.Nslopes - 2 * tomo.Nsubap[tomo.Nw-1];
int *tab_wfs;
tab_wfs = (int*)malloc(size*sizeof(int));
int *tab_subap;
tab_subap = (int*)malloc(size*sizeof(int));
int *tab_xy;
tab_xy = (int*)malloc(size*sizeof(int));
long ts = Nw - 1;//Truth sensor : ts
int cpt = 0;
for (int cc=0;cc<Nw;cc++) {
if (cc != ts) {
int nslps = tomo.Nsubap[cc]*2;
for (int ccc=0;ccc<nslps;ccc++) {
if (cc > ts) tab_wfs[ccc+cpt] = cc - 1;
else tab_wfs[ccc+cpt] = cc;
if (ccc < nslps/2) {
tab_subap[ccc+cpt] = ccc;
tab_xy[ccc+cpt] = 0;
} else {
tab_subap[ccc+cpt] = ccc - nslps/2;
tab_xy[ccc+cpt] = 1;
}
}
cpt += nslps;
}
}
int *tab_wfs_d;
//printf("sizeof tab_wfs is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_wfs_d, size*sizeof(int));
process_error(e, "alloc gpu tab_wfs_d");
e = cudaMemcpy(tab_wfs_d, tab_wfs, size*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_wfs_d");
int *tab_subap_d;
//printf("sizeof tab_subap is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_subap_d, size*sizeof(int));
process_error(e, "alloc gpu tab_subap_d");
e = cudaMemcpy(tab_subap_d, tab_subap, size*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_subap_d");
int *tab_xy_d;
//printf("sizeof tab_xy is %.2f KB\n", nrows*sizeof(int)/1024.0);
e = cudaMalloc((void**)&tab_xy_d, size*sizeof(int));
process_error(e, "alloc gpu tab_xy_d");
e = cudaMemcpy(tab_xy_d, tab_xy, size*sizeof(int), cudaMemcpyHostToDevice);
process_error(e, "copy gpu tab_xy_d");
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
matcov_gpu_kernel<<<dimGrid, dimBlock, 0, tomo_gpu->matcov_stream>>>(data, nrows, ncols, xoffset, yoffset, lda, tab_wfs_d, tab_subap_d, tab_xy_d,
convert,tomo_gpu->sspSizeL_d,tomo_gpu->Nssp_d,tomo_gpu->u_d,tomo_gpu->v_d,
pasDPHI,tomo_gpu->tabDPHI_d,tomo_gpu->indexL0_d,tomo_gpu->cn2_d,
Ndphi,tomo.Nw,tomo.Nlayer,Nsubap,type_mat,tomo.DiamTel);
if (type_mat == 1)
matcov_gpu_kernel_copy<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda);
cudaStreamSynchronize(tomo_gpu->matcov_stream);
if (tab_wfs) free(tab_wfs);
if (tab_subap) free(tab_subap);
if (tab_xy) free(tab_xy);
if (tab_wfs_d) cudaFree(tab_wfs_d);
if (tab_subap_d) cudaFree(tab_subap_d);
if (tab_xy_d) cudaFree(tab_xy_d);
}
void matts_gpu(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
const long Nw = tomo.Nw;
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[Nw-1];
matts_gpu_kernel<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda,
convert,tomo_gpu->X_d,tomo_gpu->Y_d,tomo_gpu->Nssp_d,
pasDPHI,tomo_gpu->tabDPHI_d,tomo_gpu->indexL0_d,tomo_gpu->cn2_d,
Ndphi,tomo.Nw,tomo.Nlayer,Nsubap,tomo.DiamTel);
CudaCheckError();
}
__device__ double compute_element_noise(int ipos, int jpos, double convert, double *sspSizeL, long *Nssp, double *u, double *v,
double pasDPHI, double *tabDPHI, long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer,
int Nsubap, double *alphaX, double *alphaY, double lgs_cst, double noise_var, double spotWidth,
double dH_lgs, double alt_lgs, int type_mat, int nlgs, double teldiam)
{
/* *** Covariance matrix per-element generation ***
* Arguments
* =========
* ipos: Integer: global x-coordinate of the element w.r.t. the entire matrix
* jpos: Integer: global y-coordinate of the element w.r.t. the entire matrix
*/
// for now return a dummy value
const double lambda2 = 0.00026942094446267851;
//WFS m
int m = ipos / (2 * Nsubap);
if (type_mat == 3) m = Nw-1;
//WFS n
int n = jpos / (2 * Nsubap);
if (type_mat == 2) n = Nw-1;
//subap i
int i = ipos % (2 * Nsubap);
//subap j
int j = jpos % (2 * Nsubap);
//xy i
int xy_i;
//xy j
int xy_j;
if (i>=Nsubap) {
i-= Nsubap;
xy_i = 1;
} else xy_i = 0;
if (j>=Nsubap) {
j-= Nsubap;
xy_j = 1;
} else xy_j = 0;
const double sspSizem = teldiam / Nssp[m];
const double sspSizen = teldiam / Nssp[n];
const double kk = lambda2 / (sspSizem * sspSizen);
int type = xy_i * 2 + xy_j;
//Layer l
double covar = 0.0;
#pragma unroll
for (int l = 0; l < Nlayer; l++)
{
const double sspSizeml = sspSizeL[m * Nlayer + l];
const double sspSizenl = sspSizeL[n * Nlayer + l];
//test if the altitude layers is not higher than the LGS altitude
if ((sspSizeml > 0) && (sspSizenl > 0))
{
const int pos1 = m + i * Nw + l * Nw * Nsubap;
const int pos2 = n + j * Nw + l * Nw * Nsubap;
const double du = u[pos1] - u[pos2];
const double dv = v[pos1] - v[pos2];
const double s1 = sspSizeml * 0.5;
const double s2 = sspSizenl * 0.5;
const double ac = s1 - s2;
const double ad = s1 + s2;
const double bc = -ad; // initially -s1-s2;
const double bd = -ac; // initially -s1+s2;
if (type == 0) covar += 0.5 * pasDPHI * cov_XX(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else if (type == 3) covar += 0.5 * pasDPHI * cov_YY(du,dv,ac,ad,bc,bd,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l];
else //if ((type == 1) || (type == 2))
{
const double s0 = sqrt(s1 * s1 + s2 * s2); //half size of the subaperture equivalent to a convolution by s1 and s2
const double dd = (s1 > s2) ? 1. - s2 / s1 : 1. - s1 / s2; // Nono's style ....
covar += 0.25 * pasDPHI * cov_XY(du,dv,s0,tabDPHI,indexL0[l],convert,Ndphi) * kk * cn2[l] * (1. - dd * dd);
}
}
}
// adding noise
if (m == n) {
if (m < nlgs) {
if (i == j) {
// lgs case
const int pos1 = m + i * Nw;
double x = u[pos1];
double y = v[pos1];
const double xwfs = alphaX[m] * 206265;
const double ywfs = alphaY[m] * 206265;
double lltx = 0;
double llty = 0;
const double lltnorm = sqrtf(xwfs*xwfs + ywfs*ywfs);
if (lltnorm != 0) {
lltx = xwfs / lltnorm * teldiam / 2.0;
llty = ywfs / lltnorm * teldiam / 2.0;
}
x -= lltx;
y -= llty;
x = 206265. * dH_lgs * x / alt_lgs / alt_lgs; // extension at Fwhm, in arcsec
y = 206265. * dH_lgs * y / alt_lgs / alt_lgs; // extension at Fwhm, in arcsec
const double lgsExt = sqrtf(x * x + y * y); // lengh of the extension
const double lgsTheta = x != 0 ? atanf( y / x) : 0.0; // angle of extension
const double totalExt = sqrtf( lgsExt * lgsExt + spotWidth * spotWidth);
// lengh of the extension including seeing, laser size, ...
const double ratio = totalExt / spotWidth;
const double noiseLongAxis = noise_var * ratio * ratio;
if (type == 0) covar += noiseLongAxis * cosf(lgsTheta) * cosf(lgsTheta) +
noise_var * sinf(lgsTheta) * sinf(lgsTheta);
else if (type == 3) covar += noiseLongAxis * sinf(lgsTheta) * sinf(lgsTheta) +
noise_var * cosf(lgsTheta) * cosf(lgsTheta);
else covar += (noiseLongAxis-noise_var) * sinf(lgsTheta) * cosf(lgsTheta);
}
if ((type == 0) || (type == 3))
covar += lgs_cst;
} else {
// ngs case
if (i==j) {
if ((type == 0) || (type == 3)) {
covar += noise_var;
}
}
}
}
return (double)covar;
}
__global__ void matcovnoise_gpu_kernel(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda,
double convert, double *sspSizeL, long *Nssp, double *u, double *v,
double pasDPHI, double *tabDPHI, long *indexL0, double *cn2, int Ndphi, int Nw, int Nlayer,
int Nsubap, double *alphaX, double *alphaY, double lgs_cst, double noise_var, double spotWidth,
double dH_lgs, double alt_lgs, int type_mat, int nlgs, double teldiam)
{
/* *** covariance matrix generation kernel ***
* The kernel generates the element values in a given matrix/submatrix
* The generation function can be any function, as long as each element
* can be computed both individually and independently
*
* see argument description in the kernel driver
*/
// local thread coordinates w.r.t. thread block
const int tx_ = threadIdx.x;
const int ty_ = threadIdx.y;
// local thread block coordinates w.r.t. kernel grid
const int bx_ = blockIdx.x;
const int by_ = blockIdx.y;
// local coordinates of the element w.r.t. submatrix
int lx = bx_ * blockDim.x + tx_;
int ly = by_ * blockDim.y + ty_;
// global coordinates of the elemnt w.r.t. the entire matrix
int gx = lx + xoffset;
int gy = ly + yoffset;
// out-of-bound threads should terminate
if( (lx >= nrows) || (ly >= ncols) ) return;
// Advance the data pointer accordingly
data += ly * lda + lx;
if ((type_mat == 3) || (gx <= gy)) {
// call the generation function
data[0] = compute_element_noise(gx, gy, convert, sspSizeL, Nssp, u, v, pasDPHI, tabDPHI, indexL0, cn2, Ndphi, Nw, Nlayer,
Nsubap, alphaX, alphaY, lgs_cst, noise_var, spotWidth, dH_lgs, alt_lgs, type_mat, nlgs, teldiam);
//printf("gx = %d, gy = %d ----- %.2f \n", gx, gy, data[0]);
}
}
void matcov_gpu4(double* data, int nrows, int ncols, int xoffset, int yoffset, int lda, struct tomo_struct tomo,
struct tomo_gpu_struct *tomo_gpu)
{
/* *** matcov gpu kernel driver ***
* Arguments
* ==========
* data double pointer: A pointer to the matrix/submatrix to be generated. It
* should always point to the first element in a matrix/submatrix
*
* nrows integer: The number of rows of the matrix/submatrix to be generated
*
* ncols integer: The number of columns of the matrix/submatrix to be generated
*
* xoffset integer: The x-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the x-coordinate of the first element in the matrix/submatrix
*
* yoffset integer: The y-offset of the submatrix, must be zero if the entire matrix
* is generated. Its the y-coordinate of the first element in the matrix/submatrix
*
* lda integer: The leading dimension of the matrix/submatrix
*/
// %%%%%%% Pre-computation of DPHI %%%%%%%%%%
//Computes an array of DPHI (tabDPHI) for an array of subaperture distance rr for each DIFFERENT L0
const double crmax = tomo.rmax;
const double pasDPHI = 1./tomo.pasDPHI; //inverse du pas de rr
const long Ndphi = floor(crmax*pasDPHI)+1;
const double convert = (double)(Ndphi-1)/(crmax+1./pasDPHI);
int type_mat = tomo.part;
int nbx = nrows / matcov_thread_x + (nrows%matcov_thread_x != 0);
int nby = ncols / matcov_thread_y + (ncols%matcov_thread_y != 0);
dim3 dimBlock(matcov_thread_x, matcov_thread_y);
dim3 dimGrid(nbx, nby);
const long Nsubap = tomo.Nsubap[0];
matcovnoise_gpu_kernel<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda, convert, tomo_gpu->sspSizeL_d,
tomo_gpu->Nssp_d, tomo_gpu->u_d, tomo_gpu->v_d, pasDPHI, tomo_gpu->tabDPHI_d,
tomo_gpu->indexL0_d, tomo_gpu->cn2_d, Ndphi, tomo.Nw, tomo.Nlayer,
Nsubap, tomo_gpu->alphaX_d, tomo_gpu->alphaY_d, tomo.lgs_cst, tomo.noise_var,
tomo.spot_width, tomo.lgs_depth, tomo.lgs_alt, type_mat, tomo.nlgs, tomo.DiamTel);
if (type_mat == 1)
matcov_gpu_kernel_copy<<<dimGrid, dimBlock>>>(data, nrows, ncols, xoffset, yoffset, lda);
}
|
c1be623d48b6acc490a2364dec76c2b243abc692.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam_cuda;
__constant__ double gm1_cuda;
__constant__ double cfl_cuda;
__constant__ double eps_cuda;
__constant__ double mach_cuda;
__constant__ double alpha_cuda;
__constant__ double qinf_cuda[4];
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"gam")) {
cutilSafeCall(hipMemcpyToSymbol(gam_cuda, dat, dim*size));
}
else
if (!strcmp(name,"gm1")) {
cutilSafeCall(hipMemcpyToSymbol(gm1_cuda, dat, dim*size));
}
else
if (!strcmp(name,"cfl")) {
cutilSafeCall(hipMemcpyToSymbol(cfl_cuda, dat, dim*size));
}
else
if (!strcmp(name,"eps")) {
cutilSafeCall(hipMemcpyToSymbol(eps_cuda, dat, dim*size));
}
else
if (!strcmp(name,"mach")) {
cutilSafeCall(hipMemcpyToSymbol(mach_cuda, dat, dim*size));
}
else
if (!strcmp(name,"alpha")) {
cutilSafeCall(hipMemcpyToSymbol(alpha_cuda, dat, dim*size));
}
else
if (!strcmp(name,"qinf")) {
cutilSafeCall(hipMemcpyToSymbol(qinf_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "save_soln_kernel.cu"
#include "adt_calc_kernel.hip"
#include "res_calc_kernel.cu"
#include "bres_calc_kernel.cu"
#include "update_kernel.hip"
|
c1be623d48b6acc490a2364dec76c2b243abc692.cu
|
//
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam_cuda;
__constant__ double gm1_cuda;
__constant__ double cfl_cuda;
__constant__ double eps_cuda;
__constant__ double mach_cuda;
__constant__ double alpha_cuda;
__constant__ double qinf_cuda[4];
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"gam")) {
cutilSafeCall(cudaMemcpyToSymbol(gam_cuda, dat, dim*size));
}
else
if (!strcmp(name,"gm1")) {
cutilSafeCall(cudaMemcpyToSymbol(gm1_cuda, dat, dim*size));
}
else
if (!strcmp(name,"cfl")) {
cutilSafeCall(cudaMemcpyToSymbol(cfl_cuda, dat, dim*size));
}
else
if (!strcmp(name,"eps")) {
cutilSafeCall(cudaMemcpyToSymbol(eps_cuda, dat, dim*size));
}
else
if (!strcmp(name,"mach")) {
cutilSafeCall(cudaMemcpyToSymbol(mach_cuda, dat, dim*size));
}
else
if (!strcmp(name,"alpha")) {
cutilSafeCall(cudaMemcpyToSymbol(alpha_cuda, dat, dim*size));
}
else
if (!strcmp(name,"qinf")) {
cutilSafeCall(cudaMemcpyToSymbol(qinf_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "save_soln_kernel.cu"
#include "adt_calc_kernel.cu"
#include "res_calc_kernel.cu"
#include "bres_calc_kernel.cu"
#include "update_kernel.cu"
|
b1f1cae984e116c5f901842f8983796f278f26f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "conv_1d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
conv_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n,m);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
conv_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n,m);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
conv_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n,m);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b1f1cae984e116c5f901842f8983796f278f26f3.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "conv_1d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
conv_1d<<<gridBlock,threadBlock>>>(a,b,c,n,m);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
conv_1d<<<gridBlock,threadBlock>>>(a,b,c,n,m);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
conv_1d<<<gridBlock,threadBlock>>>(a,b,c,n,m);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ed609ff27d0ab803ce390a4e324defc91f4b129e.hip
|
// !!! This is a file automatically generated by hipify!!!
//======================================
//
// GPU
//======================================
#include"stdafx.h"
#include"BatchNormalizationAll_LayerData_GPU.cuh"
#include"BatchNormalizationAll_FUNC.hpp"
#include"BatchNormalizationAll_GPU.cuh"
#include"Library/NeuralNetwork/Optimizer.h"
#include"../_LayerBase/CLayerBase_GPU.cuh"
using namespace Gravisbell;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
//===========================
// /
//===========================
/** */
BatchNormalizationAll_LayerData_GPU::BatchNormalizationAll_LayerData_GPU(const Gravisbell::GUID& guid)
: BatchNormalizationAll_LayerData_Base(guid)
{
}
/** */
BatchNormalizationAll_LayerData_GPU::~BatchNormalizationAll_LayerData_GPU()
{
}
//===========================
//
//===========================
/** .
@return 0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::Initialize(void)
{
this->lpMean.resize(1);
this->lpVariance.resize(1);
this->lpScale.resize(1);
this->lpBias.resize(1);
for(U32 ch=0; ch<1; ch++)
{
this->lpMean[ch] = 0.0f;
this->lpVariance[ch] = 0.0f;
this->lpScale[ch] = 1.0f;
this->lpBias[ch] = 0.0f;
}
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@param i_config
@oaram i_inputDataStruct
@return 0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::Initialize(const SettingData::Standard::IData& i_data)
{
ErrorCode err;
//
err = this->SetLayerConfig(i_data);
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
//
err = this->Initialize();
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
//
err = this->ChangeOptimizer(L"SGD");
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
return ErrorCode::ERROR_CODE_NONE;
}
/** .
@param i_lpBuffer .
@param i_bufferSize .
@return 0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::InitializeFromBuffer(const BYTE* i_lpBuffer, U64 i_bufferSize, S64& o_useBufferSize )
{
S64 readBufferByte = 0;
//
S64 useBufferByte = 0;
SettingData::Standard::IData* pLayerStructure = CreateLayerStructureSettingFromBuffer(&i_lpBuffer[readBufferByte], i_bufferSize, useBufferByte);
if(pLayerStructure == NULL)
return ErrorCode::ERROR_CODE_INITLAYER_READ_CONFIG;
readBufferByte += useBufferByte;
this->SetLayerConfig(*pLayerStructure);
delete pLayerStructure;
//
this->Initialize();
//
hipMemcpy(thrust::raw_pointer_cast(&this->lpMean[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), hipMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpMean.size();
//
hipMemcpy(thrust::raw_pointer_cast(&this->lpVariance[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpVariance.size(), hipMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpVariance.size();
//
hipMemcpy(thrust::raw_pointer_cast(&this->lpScale[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpScale.size(), hipMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpScale.size();
//
hipMemcpy(thrust::raw_pointer_cast(&this->lpBias[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpBias.size(), hipMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpBias.size();
//
S64 useBufferSize = 0;
// bias
if(this->m_pOptimizer_bias)
delete this->m_pOptimizer_bias;
this->m_pOptimizer_bias = CreateOptimizerFromBuffer_GPU(&i_lpBuffer[readBufferByte], i_bufferSize-readBufferByte, useBufferSize);
readBufferByte += useBufferSize;
// neuron
if(this->m_pOptimizer_scale)
delete this->m_pOptimizer_scale;
this->m_pOptimizer_scale = CreateOptimizerFromBuffer_GPU(&i_lpBuffer[readBufferByte], i_bufferSize-readBufferByte, useBufferSize);
readBufferByte += useBufferSize;
o_useBufferSize = readBufferByte;
return ErrorCode::ERROR_CODE_NONE;
}
//===========================
//
//===========================
/** .
@param o_lpBuffer . GetUseBufferByteCount
@return . */
S64 BatchNormalizationAll_LayerData_GPU::WriteToBuffer(BYTE* o_lpBuffer)const
{
if(this->pLayerStructure == NULL)
return ErrorCode::ERROR_CODE_NONREGIST_CONFIG;
S64 writeBufferByte = 0;
//
writeBufferByte += this->pLayerStructure->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
//
hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpMean[0]), sizeof(F32)*this->lpMean.size(), hipMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpMean.size();
//
hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpVariance[0]), sizeof(F32)*this->lpVariance.size(), hipMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpVariance.size();
//
hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpScale[0]), sizeof(F32)*this->lpScale.size(), hipMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpScale.size();
//
hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpBias[0]), sizeof(F32)*this->lpBias.size(), hipMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpBias.size();
//
// bias
writeBufferByte += this->m_pOptimizer_bias->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
// neuron
writeBufferByte += this->m_pOptimizer_scale->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
return writeBufferByte;
}
//===========================
//
//===========================
/** .
@param guid GUID. */
ILayerBase* BatchNormalizationAll_LayerData_GPU::CreateLayer(const Gravisbell::GUID& guid, const IODataStruct i_lpInputDataStruct[], U32 i_inputLayerCount, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
{
if(this->CheckCanUseInputDataStruct(i_lpInputDataStruct, i_inputLayerCount) == false)
return NULL;
return new CNNSingle2SingleLayerBase_GPU<BatchNormalizationAll_GPU, BatchNormalizationAll_LayerData_GPU>(guid, *this, i_lpInputDataStruct[0], i_temporaryMemoryManager);
}
//===========================
//
//===========================
/** */
ErrorCode BatchNormalizationAll_LayerData_GPU::ChangeOptimizer(const wchar_t i_optimizerID[])
{
ChangeOptimizer_GPU(&this->m_pOptimizer_bias, i_optimizerID, (U32)this->lpBias.size());
ChangeOptimizer_GPU(&this->m_pOptimizer_scale, i_optimizerID, (U32)this->lpScale.size());
return ErrorCode::ERROR_CODE_NONE;
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
/** Create a layer for GPU processing.
* @param GUID of layer to create.
*/
EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPU(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const Gravisbell::SettingData::Standard::IData& i_data)
{
//
Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU(guid);
if(pLayerData == NULL)
return NULL;
//
Gravisbell::ErrorCode errCode = pLayerData->Initialize(i_data);
if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE)
{
delete pLayerData;
return NULL;
}
return pLayerData;
}
EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPUfromBuffer(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const BYTE* i_lpBuffer, S64 i_bufferSize, S64& o_useBufferSize)
{
//
Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU(guid);
if(pLayerData == NULL)
return NULL;
//
S64 useBufferSize = 0;
Gravisbell::ErrorCode errCode = pLayerData->InitializeFromBuffer(i_lpBuffer, i_bufferSize, useBufferSize);
if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE)
{
delete pLayerData;
return NULL;
}
//
o_useBufferSize = useBufferSize;
return pLayerData;
}
|
ed609ff27d0ab803ce390a4e324defc91f4b129e.cu
|
//======================================
// バッチ正規化のレイヤーデータ
// GPU制御
//======================================
#include"stdafx.h"
#include"BatchNormalizationAll_LayerData_GPU.cuh"
#include"BatchNormalizationAll_FUNC.hpp"
#include"BatchNormalizationAll_GPU.cuh"
#include"Library/NeuralNetwork/Optimizer.h"
#include"../_LayerBase/CLayerBase_GPU.cuh"
using namespace Gravisbell;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
//===========================
// コンストラクタ / デストラクタ
//===========================
/** コンストラクタ */
BatchNormalizationAll_LayerData_GPU::BatchNormalizationAll_LayerData_GPU(const Gravisbell::GUID& guid)
: BatchNormalizationAll_LayerData_Base(guid)
{
}
/** デストラクタ */
BatchNormalizationAll_LayerData_GPU::~BatchNormalizationAll_LayerData_GPU()
{
}
//===========================
// 初期化
//===========================
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::Initialize(void)
{
this->lpMean.resize(1);
this->lpVariance.resize(1);
this->lpScale.resize(1);
this->lpBias.resize(1);
for(U32 ch=0; ch<1; ch++)
{
this->lpMean[ch] = 0.0f;
this->lpVariance[ch] = 0.0f;
this->lpScale[ch] = 1.0f;
this->lpBias[ch] = 0.0f;
}
return ErrorCode::ERROR_CODE_NONE;
}
/** 初期化. 各ニューロンの値をランダムに初期化
@param i_config 設定情報
@oaram i_inputDataStruct 入力データ構造情報
@return 成功した場合0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::Initialize(const SettingData::Standard::IData& i_data)
{
ErrorCode err;
// 設定情報の登録
err = this->SetLayerConfig(i_data);
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
// 初期化
err = this->Initialize();
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
// オプティマイザーの設定
err = this->ChangeOptimizer(L"SGD");
if(err != ErrorCode::ERROR_CODE_NONE)
return err;
return ErrorCode::ERROR_CODE_NONE;
}
/** 初期化. バッファからデータを読み込む
@param i_lpBuffer 読み込みバッファの先頭アドレス.
@param i_bufferSize 読み込み可能バッファのサイズ.
@return 成功した場合0 */
ErrorCode BatchNormalizationAll_LayerData_GPU::InitializeFromBuffer(const BYTE* i_lpBuffer, U64 i_bufferSize, S64& o_useBufferSize )
{
S64 readBufferByte = 0;
// 設定情報
S64 useBufferByte = 0;
SettingData::Standard::IData* pLayerStructure = CreateLayerStructureSettingFromBuffer(&i_lpBuffer[readBufferByte], i_bufferSize, useBufferByte);
if(pLayerStructure == NULL)
return ErrorCode::ERROR_CODE_INITLAYER_READ_CONFIG;
readBufferByte += useBufferByte;
this->SetLayerConfig(*pLayerStructure);
delete pLayerStructure;
// 初期化する
this->Initialize();
// 平均
cudaMemcpy(thrust::raw_pointer_cast(&this->lpMean[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), cudaMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpMean.size();
// 分散
cudaMemcpy(thrust::raw_pointer_cast(&this->lpVariance[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpVariance.size(), cudaMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpVariance.size();
// スケーリング値
cudaMemcpy(thrust::raw_pointer_cast(&this->lpScale[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpScale.size(), cudaMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpScale.size();
// バイアス値
cudaMemcpy(thrust::raw_pointer_cast(&this->lpBias[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpBias.size(), cudaMemcpyHostToDevice);
readBufferByte += sizeof(F32)*(U32)this->lpBias.size();
// オプティマイザ
S64 useBufferSize = 0;
// bias
if(this->m_pOptimizer_bias)
delete this->m_pOptimizer_bias;
this->m_pOptimizer_bias = CreateOptimizerFromBuffer_GPU(&i_lpBuffer[readBufferByte], i_bufferSize-readBufferByte, useBufferSize);
readBufferByte += useBufferSize;
// neuron
if(this->m_pOptimizer_scale)
delete this->m_pOptimizer_scale;
this->m_pOptimizer_scale = CreateOptimizerFromBuffer_GPU(&i_lpBuffer[readBufferByte], i_bufferSize-readBufferByte, useBufferSize);
readBufferByte += useBufferSize;
o_useBufferSize = readBufferByte;
return ErrorCode::ERROR_CODE_NONE;
}
//===========================
// レイヤー保存
//===========================
/** レイヤーをバッファに書き込む.
@param o_lpBuffer 書き込み先バッファの先頭アドレス. GetUseBufferByteCountの戻り値のバイト数が必要
@return 成功した場合書き込んだバッファサイズ.失敗した場合は負の値 */
S64 BatchNormalizationAll_LayerData_GPU::WriteToBuffer(BYTE* o_lpBuffer)const
{
if(this->pLayerStructure == NULL)
return ErrorCode::ERROR_CODE_NONREGIST_CONFIG;
S64 writeBufferByte = 0;
// 設定情報
writeBufferByte += this->pLayerStructure->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
// 平均
cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpMean[0]), sizeof(F32)*this->lpMean.size(), cudaMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpMean.size();
// 分散
cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpVariance[0]), sizeof(F32)*this->lpVariance.size(), cudaMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpVariance.size();
// スケーリング値
cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpScale[0]), sizeof(F32)*this->lpScale.size(), cudaMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpScale.size();
// バイアス値
cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpBias[0]), sizeof(F32)*this->lpBias.size(), cudaMemcpyDeviceToHost);
writeBufferByte += sizeof(F32)*(U32)this->lpBias.size();
// オプティマイザ
// bias
writeBufferByte += this->m_pOptimizer_bias->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
// neuron
writeBufferByte += this->m_pOptimizer_scale->WriteToBuffer(&o_lpBuffer[writeBufferByte]);
return writeBufferByte;
}
//===========================
// レイヤー作成
//===========================
/** レイヤーを作成する.
@param guid 新規生成するレイヤーのGUID. */
ILayerBase* BatchNormalizationAll_LayerData_GPU::CreateLayer(const Gravisbell::GUID& guid, const IODataStruct i_lpInputDataStruct[], U32 i_inputLayerCount, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
{
if(this->CheckCanUseInputDataStruct(i_lpInputDataStruct, i_inputLayerCount) == false)
return NULL;
return new CNNSingle2SingleLayerBase_GPU<BatchNormalizationAll_GPU, BatchNormalizationAll_LayerData_GPU>(guid, *this, i_lpInputDataStruct[0], i_temporaryMemoryManager);
}
//===========================
// オプティマイザー設定
//===========================
/** オプティマイザーを変更する */
ErrorCode BatchNormalizationAll_LayerData_GPU::ChangeOptimizer(const wchar_t i_optimizerID[])
{
ChangeOptimizer_GPU(&this->m_pOptimizer_bias, i_optimizerID, (U32)this->lpBias.size());
ChangeOptimizer_GPU(&this->m_pOptimizer_scale, i_optimizerID, (U32)this->lpScale.size());
return ErrorCode::ERROR_CODE_NONE;
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
/** Create a layer for GPU processing.
* @param GUID of layer to create.
*/
EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPU(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const Gravisbell::SettingData::Standard::IData& i_data)
{
// 作成
Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU(guid);
if(pLayerData == NULL)
return NULL;
// 初期化
Gravisbell::ErrorCode errCode = pLayerData->Initialize(i_data);
if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE)
{
delete pLayerData;
return NULL;
}
return pLayerData;
}
EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPUfromBuffer(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const BYTE* i_lpBuffer, S64 i_bufferSize, S64& o_useBufferSize)
{
// 作成
Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::BatchNormalizationAll_LayerData_GPU(guid);
if(pLayerData == NULL)
return NULL;
// 初期化
S64 useBufferSize = 0;
Gravisbell::ErrorCode errCode = pLayerData->InitializeFromBuffer(i_lpBuffer, i_bufferSize, useBufferSize);
if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE)
{
delete pLayerData;
return NULL;
}
// 使用したバッファ量を格納
o_useBufferSize = useBufferSize;
return pLayerData;
}
|
9c5894cf430adcf1f71692024b1cdfe6ad2a7591.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cassert>
#include "conv_kernel.h"
/**
* Will perform a convolution on the input image
* Original code from here:
* http://www.songho.ca/dsp/convolution/convolution.html#cpp_conv2d
*/
__global__
void perform_convolution(double *kernel, int kRows, int kCols,
double *in, double *out, int rows, int cols) {
// For now a hack
assert(kRows == 10);
assert(kCols == 10);
// Copy to shared memory
__shared__ double skernel[10*10];
if (threadIdx.x < 10 && threadIdx.y < 10) {
skernel[threadIdx.x*kRows+threadIdx.y] = kernel[threadIdx.x*kRows+threadIdx.y];
}
// Sync so we know we have copied everything
__syncthreads();
// find center position of kernel (half of kernel size)
int kCenterX = kCols / 2;
int kCenterY = kRows / 2;
// Corrected locations to start filter on
int mm,nn;
int ii,jj;
// Calculate our given location from our block/thread id
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
// Return if we are out of bounds
if(i > rows || j > cols)
return;
// Temp variable to store the sum in
int tempout = i*rows + j;
double temp = 0.0;
// kernel rows
for(int m=0; m < kRows; ++m)
{
// row index of flipped kernel
mm = kRows - 1 - m;
// kernel columns
for(int n=0; n < kCols; ++n)
{
// column index of flipped kernel
nn = kCols - 1 - n;
// index of input signal, used for checking boundary
ii = i + (m - kCenterY);
jj = j + (n - kCenterX);
// ignore input samples which are out of bound
if( ii >= 0 && ii < rows && jj >= 0 && jj < cols ) {
// calculate 2d => 1d mapping
int tempin = ii*rows + jj;
int tempkerneel = mm*kRows + nn;
// multiple it times our kernel
temp += in[tempin] * skernel[tempkerneel];
}
}
}
// Finally write to memory location
out[tempout] = temp;
}
|
9c5894cf430adcf1f71692024b1cdfe6ad2a7591.cu
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cassert>
#include "conv_kernel.h"
/**
* Will perform a convolution on the input image
* Original code from here:
* http://www.songho.ca/dsp/convolution/convolution.html#cpp_conv2d
*/
__global__
void perform_convolution(double *kernel, int kRows, int kCols,
double *in, double *out, int rows, int cols) {
// For now a hack
assert(kRows == 10);
assert(kCols == 10);
// Copy to shared memory
__shared__ double skernel[10*10];
if (threadIdx.x < 10 && threadIdx.y < 10) {
skernel[threadIdx.x*kRows+threadIdx.y] = kernel[threadIdx.x*kRows+threadIdx.y];
}
// Sync so we know we have copied everything
__syncthreads();
// find center position of kernel (half of kernel size)
int kCenterX = kCols / 2;
int kCenterY = kRows / 2;
// Corrected locations to start filter on
int mm,nn;
int ii,jj;
// Calculate our given location from our block/thread id
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
// Return if we are out of bounds
if(i > rows || j > cols)
return;
// Temp variable to store the sum in
int tempout = i*rows + j;
double temp = 0.0;
// kernel rows
for(int m=0; m < kRows; ++m)
{
// row index of flipped kernel
mm = kRows - 1 - m;
// kernel columns
for(int n=0; n < kCols; ++n)
{
// column index of flipped kernel
nn = kCols - 1 - n;
// index of input signal, used for checking boundary
ii = i + (m - kCenterY);
jj = j + (n - kCenterX);
// ignore input samples which are out of bound
if( ii >= 0 && ii < rows && jj >= 0 && jj < cols ) {
// calculate 2d => 1d mapping
int tempin = ii*rows + jj;
int tempkerneel = mm*kRows + nn;
// multiple it times our kernel
temp += in[tempin] * skernel[tempkerneel];
}
}
}
// Finally write to memory location
out[tempout] = temp;
}
|
a7a6bb321ad9b9c1ad38cb6d6f798b73d503946a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 3;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
hipComplex gren(2.0,0.0);
hipComplex next=flurn;
hipComplex current = cue;
hipComplex xnext = flurn;
hipComplex xcurrent = cue;
hipComplex tinny(.0001,0.0001);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
for(v=0;v<20;v++)
{
cue = aon*the3(q/aon,expc(vro*ip/cue))/the3(q/cue,expc(vro*ip*cue));
cue = uon*the3(q/aon,expc(vro*ip/(cue*cue)))/the3(q/cue,expc(vro*ip*(cue*cue)));
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
|
a7a6bb321ad9b9c1ad38cb6d6f798b73d503946a.cu
|
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 3;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
cuComplex gren(2.0,0.0);
cuComplex next=flurn;
cuComplex current = cue;
cuComplex xnext = flurn;
cuComplex xcurrent = cue;
cuComplex tinny(.0001,0.0001);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
for(v=0;v<20;v++)
{
cue = aon*the3(q/aon,expc(vro*ip/cue))/the3(q/cue,expc(vro*ip*cue));
cue = uon*the3(q/aon,expc(vro*ip/(cue*cue)))/the3(q/cue,expc(vro*ip*(cue*cue)));
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
|
26ea8317dc44715fda1822f5b914bee32cf8cef1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <atomic>
#include <mutex>
#include <thread>
#include <sys/types.h>
#include "ServerFunction.cu"
int n_stream=16;
int chunk_size = 512;
int threads_num = 512;
int total_task_num = 0;
int cpu_threads_num = 8;
int chunk_num = 3;
int round_num = 0;
int taskNum = 1;
int recalcu_region_size = 2;
std::atomic<int> handled_task_num{0};
std::queue<Task> cputaskq;
std::map<std::string, Task> taskMap;
std::map<std::string, buildTask> buildTaskMap;
hipStream_t *stream;
int time_use;
struct timeval start;
struct timeval end;
#define USAGE \
"usage:\n" \
" server [options]\n" \
"options:\n" \
" -s [chunk_size] Chunk size of file\n" \
" -n [chunk_num_each_thread] Chunk num each thread calculate\n" \
" -t [threads_num] Threads num for rsync file\n" \
" -h Show this help message\n"
/* OPTIONS DESCRIPTOR ====================================================== */
static struct option gLongOptions[] = {
{"chunk_size", required_argument, NULL, 's'},
{"chunk_num_each_thread", required_argument, NULL, 'n'},
{"threads_num", required_argument, NULL, 't'},
{"cpu_threads_num", required_argument, NULL, 'c'},
{"recalcu_region_size", required_argument, NULL, 'r'},
{"total_task_num", required_argument, NULL, 'o'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
void *CPURevise(void *arg){
while(1){
if(!cputaskq.empty()){
Task tt = cputaskq.front();
cputaskq.pop();
hipFree(tt.dFileContent);
hipFree(tt.dMatchChunkid);
hipFree(tt.dMatchOffset);
hipFree(tt.dHt);
hipFree(tt.dStat);
for(int i=0;i<tt.totalThreads-1;++i){
int t_match_num = tt.stat[i];
int j = i+1;
// testing areabeginPoschunk_size*chunk_num*(i+1)
// threadmatchthreadmatch
// jumpthreadmatch
if(t_match_num > 0 && tt.stat[j] > 0 && tt.matchOffset[chunk_num*i+t_match_num-1]+chunk_size > tt.matchOffset[chunk_num*j]){
int jump_pos = tt.matchOffset[chunk_num*i+t_match_num-1]+chunk_size;
recalcu(chunk_size, chunk_num, tt.stat, jump_pos, tt.fileLen, tt.totalThreads,
tt.file, tt.matchOffset, tt.matchChunkid, tt.ht, j, 2);
}
}
free(tt.ht);
buildTask bt = {tt.fileName, tt.chunkSize, tt.totalThreads, tt.fd, tt.file, tt.stat, tt.matchChunkid, tt.matchOffset, tt.matchIdVec};
buildTaskMap[bt.fileName] = bt;
/*taskNum++;
//std::cout << taskNum << std::endl;
if(taskNum == 17){
gettimeofday(&end,NULL);
time_use=(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);//us nearly 40000us
printf("16 rsync files total computation time_use is %d us\n", (int)time_use);
}*/
}
else{
std::this_thread::yield();
}
}
}
void RunServer() {
std::string server_address("0.0.0.0:50051");
RsyncServiceImpl service;
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "Server listening on " << server_address << std::endl;
server->Wait();
}
int main(int argc, char **argv){
int option_char = 0;
while ((option_char = getopt_long(argc, argv, "o:s:n:t:c:r:h", gLongOptions, NULL)) != -1) {
switch (option_char) {
default:
fprintf(stderr, "%s", USAGE);
exit(__LINE__);
case 's': // chunk size
chunk_size = atoi(optarg);
break;
case 'n': // chunk size
chunk_num = atoi(optarg);
break;
case 't': // rsync_file-path
threads_num = atoi(optarg);
break;
case 'o':
total_task_num = atoi(optarg);
break;
case 'c': // rsync_file-path
cpu_threads_num = atoi(optarg);
break;
case 'r': // recalcu_region_size
recalcu_region_size = atoi(optarg);
break;
case 'h': // help
fprintf(stdout, "%s", USAGE);
exit(0);
break;
}
}
stream = GPUWarmUp(n_stream);
// CPU thread
/*pthread_t tidp;
int ret;
ret = pthread_create(&tidp, NULL, CPURevise, NULL); //
if (ret){
printf("pthread_create failed:%d\n", ret);
return -1;
}*/
RunServer();
for(int i=0; i<n_stream; i++){
hipStreamDestroy(stream[i]);
}
}
|
26ea8317dc44715fda1822f5b914bee32cf8cef1.cu
|
#include <stdint.h>
#include <atomic>
#include <mutex>
#include <thread>
#include <sys/types.h>
#include "ServerFunction.cu"
int n_stream=16;
int chunk_size = 512;
int threads_num = 512;
int total_task_num = 0;
int cpu_threads_num = 8;
int chunk_num = 3;
int round_num = 0;
int taskNum = 1;
int recalcu_region_size = 2;
std::atomic<int> handled_task_num{0};
std::queue<Task> cputaskq;
std::map<std::string, Task> taskMap;
std::map<std::string, buildTask> buildTaskMap;
cudaStream_t *stream;
int time_use;
struct timeval start;
struct timeval end;
#define USAGE \
"usage:\n" \
" server [options]\n" \
"options:\n" \
" -s [chunk_size] Chunk size of file\n" \
" -n [chunk_num_each_thread] Chunk num each thread calculate\n" \
" -t [threads_num] Threads num for rsync file\n" \
" -h Show this help message\n"
/* OPTIONS DESCRIPTOR ====================================================== */
static struct option gLongOptions[] = {
{"chunk_size", required_argument, NULL, 's'},
{"chunk_num_each_thread", required_argument, NULL, 'n'},
{"threads_num", required_argument, NULL, 't'},
{"cpu_threads_num", required_argument, NULL, 'c'},
{"recalcu_region_size", required_argument, NULL, 'r'},
{"total_task_num", required_argument, NULL, 'o'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
void *CPURevise(void *arg){
while(1){
if(!cputaskq.empty()){
Task tt = cputaskq.front();
cputaskq.pop();
cudaFree(tt.dFileContent);
cudaFree(tt.dMatchChunkid);
cudaFree(tt.dMatchOffset);
cudaFree(tt.dHt);
cudaFree(tt.dStat);
for(int i=0;i<tt.totalThreads-1;++i){
int t_match_num = tt.stat[i];
int j = i+1;
// 在没有了testing area后,beginPos肯定是chunk_size*chunk_num*(i+1)
// 这种状态很好理解,就是上一个thread在最后有match,下一个thread也有match
// 而且jump后位置超过了下一个thread的第一个match位置
if(t_match_num > 0 && tt.stat[j] > 0 && tt.matchOffset[chunk_num*i+t_match_num-1]+chunk_size > tt.matchOffset[chunk_num*j]){
int jump_pos = tt.matchOffset[chunk_num*i+t_match_num-1]+chunk_size;
recalcu(chunk_size, chunk_num, tt.stat, jump_pos, tt.fileLen, tt.totalThreads,
tt.file, tt.matchOffset, tt.matchChunkid, tt.ht, j, 2);
}
}
free(tt.ht);
buildTask bt = {tt.fileName, tt.chunkSize, tt.totalThreads, tt.fd, tt.file, tt.stat, tt.matchChunkid, tt.matchOffset, tt.matchIdVec};
buildTaskMap[bt.fileName] = bt;
/*taskNum++;
//std::cout << taskNum << std::endl;
if(taskNum == 17){
gettimeofday(&end,NULL);
time_use=(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);//us nearly 40000us
printf("16 rsync files total computation time_use is %d us\n", (int)time_use);
}*/
}
else{
std::this_thread::yield();
}
}
}
void RunServer() {
std::string server_address("0.0.0.0:50051");
RsyncServiceImpl service;
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<Server> server(builder.BuildAndStart());
std::cout << "Server listening on " << server_address << std::endl;
server->Wait();
}
int main(int argc, char **argv){
int option_char = 0;
while ((option_char = getopt_long(argc, argv, "o:s:n:t:c:r:h", gLongOptions, NULL)) != -1) {
switch (option_char) {
default:
fprintf(stderr, "%s", USAGE);
exit(__LINE__);
case 's': // chunk size
chunk_size = atoi(optarg);
break;
case 'n': // chunk size
chunk_num = atoi(optarg);
break;
case 't': // rsync_file-path
threads_num = atoi(optarg);
break;
case 'o':
total_task_num = atoi(optarg);
break;
case 'c': // rsync_file-path
cpu_threads_num = atoi(optarg);
break;
case 'r': // recalcu_region_size
recalcu_region_size = atoi(optarg);
break;
case 'h': // help
fprintf(stdout, "%s", USAGE);
exit(0);
break;
}
}
stream = GPUWarmUp(n_stream);
// CPU thread
/*pthread_t tidp;
int ret;
ret = pthread_create(&tidp, NULL, CPURevise, NULL); //创建线程
if (ret){
printf("pthread_create failed:%d\n", ret);
return -1;
}*/
RunServer();
for(int i=0; i<n_stream; i++){
cudaStreamDestroy(stream[i]);
}
}
|
47652235f8d51e7c64c77bab29d3c9d715af6831.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void linearRegressionReducerKernel(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace)
{
__shared__ volatile int nv;
__shared__ volatile float numElems;
float fsum = 0.0f;
if (threadIdx.x == 0) nv = numVals[0];
for (int i = 0; i < nv; ++i)
{
fsum += vals[threadIdx.x * nv + i];
}
if (threadIdx.x == blockDim.x - 1) numElems = fsum;
keySpace[threadIdx.x] = keys[threadIdx.x];
valSpace[threadIdx.x] = fsum / numElems;
}
void linearRegressionReducerExecute(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace,
hipStream_t & stream)
{
hipLaunchKernelGGL(( linearRegressionReducerKernel), dim3(1), dim3(6), 0, stream, keys, vals, numVals, keySpace, valSpace);
#if 0
{
int cpuKeys[6];
float cpuVals[6];
hipMemcpy(cpuKeys, keySpace, sizeof(cpuKeys), hipMemcpyDeviceToHost);
hipMemcpy(cpuVals, valSpace, sizeof(cpuVals), hipMemcpyDeviceToHost);
for (int i = 0; i < 6; ++i)
{
printf("%2d: %2d - %f\n", i, cpuKeys[i], cpuVals[i]);
}
fflush(stdout);
}
#endif
}
|
47652235f8d51e7c64c77bab29d3c9d715af6831.cu
|
#include <cstdio>
__global__ void linearRegressionReducerKernel(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace)
{
__shared__ volatile int nv;
__shared__ volatile float numElems;
float fsum = 0.0f;
if (threadIdx.x == 0) nv = numVals[0];
for (int i = 0; i < nv; ++i)
{
fsum += vals[threadIdx.x * nv + i];
}
if (threadIdx.x == blockDim.x - 1) numElems = fsum;
keySpace[threadIdx.x] = keys[threadIdx.x];
valSpace[threadIdx.x] = fsum / numElems;
}
void linearRegressionReducerExecute(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace,
cudaStream_t & stream)
{
linearRegressionReducerKernel<<<1, 6, 0, stream>>>(keys, vals, numVals, keySpace, valSpace);
#if 0
{
int cpuKeys[6];
float cpuVals[6];
cudaMemcpy(cpuKeys, keySpace, sizeof(cpuKeys), cudaMemcpyDeviceToHost);
cudaMemcpy(cpuVals, valSpace, sizeof(cpuVals), cudaMemcpyDeviceToHost);
for (int i = 0; i < 6; ++i)
{
printf("%2d: %2d - %f\n", i, cpuKeys[i], cpuVals[i]);
}
fflush(stdout);
}
#endif
}
|
bd4fea13b5e4c711f5757fa968bca20b9571bdfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<time.h>
#include<stdlib.h>
using namespace std;
#define M 2
#define N 2
#define K 2
__global__ void matMulKernel(float* A, float* B, float* C)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= M || col >= N) return;
float sum =0;
for(int i=0;i<N;i++)
{
sum += A[row*N + i]*B[i*K + col];
}
C[row*K + col] = sum;
}
timespec diff(timespec start, timespec end)
{
timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int main(void)
{
//decalre host matrices
float hA[M*N],hB[N*K],hC[M*K];
timespec time1, time2, time3;
//populate host matrices
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
hA[i*N+j] = (float)(rand()%10);
// printf("%f ",hA[i*N+j]);
}
// printf("\n");
}
// printf("\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<K;j++)
{
hB[i*K+j] = (float)(rand()%10);
// printf("%f ",hB[i*K+j]);
}
// printf("\n");
}
// printf("\n");
//load dA and dB into device memory
float* dA, *dB, *dC;
hipError_t err;
uint size = M*N*sizeof(float);
err = hipMalloc((void**)&dA, size);
printf("CUDA malloc A: %s\n",hipGetErrorString(err));
err = hipMemcpy(dA,hA,size,hipMemcpyHostToDevice);
printf("Copy B to device: %s\n",hipGetErrorString(err));
size = N*K*sizeof(float);
err = hipMalloc((void**)&dB, size);
printf("CUDA malloc B: %s\n",hipGetErrorString(err));
err = hipMemcpy(dB,hB,size,hipMemcpyHostToDevice);
printf("Copy B to device: %s\n",hipGetErrorString(err));
//create dC in device memory
size = N*K*sizeof(float);
err = hipMalloc((void**)&dC, size);
printf("CUDA malloc C: %s\n",hipGetErrorString(err));
//kernel
dim3 blockSize(16,32);
dim3 gridSize;
gridSize.x = (K+15)/16;
gridSize.y = (M+31)/32;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
hipLaunchKernelGGL(( matMulKernel), dim3(gridSize), dim3(blockSize), 0, 0, dA, dB, dC);
err = hipDeviceSynchronize();
printf("Run kernel: %s\n", hipGetErrorString(err));
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
// Read C from device memory
err = hipMemcpy(hC, dC, size, hipMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",hipGetErrorString(err));
//Free device memory
hipFree(dA);
hipFree(dB);
hipFree(dC);
time3 = diff(time1,time2);
printf("%ld:%ld\n",time3.tv_sec, time3.tv_nsec);
/* for(int i=0;i<M;i++)
{
for(int j=0;j<K;j++)
{
printf("%f ",hC[i*K+j]);
}
printf("\n");
}*/
}
|
bd4fea13b5e4c711f5757fa968bca20b9571bdfa.cu
|
#include <stdio.h>
#include<time.h>
#include<stdlib.h>
using namespace std;
#define M 2
#define N 2
#define K 2
__global__ void matMulKernel(float* A, float* B, float* C)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= M || col >= N) return;
float sum =0;
for(int i=0;i<N;i++)
{
sum += A[row*N + i]*B[i*K + col];
}
C[row*K + col] = sum;
}
timespec diff(timespec start, timespec end)
{
timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int main(void)
{
//decalre host matrices
float hA[M*N],hB[N*K],hC[M*K];
timespec time1, time2, time3;
//populate host matrices
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
hA[i*N+j] = (float)(rand()%10);
// printf("%f ",hA[i*N+j]);
}
// printf("\n");
}
// printf("\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<K;j++)
{
hB[i*K+j] = (float)(rand()%10);
// printf("%f ",hB[i*K+j]);
}
// printf("\n");
}
// printf("\n");
//load dA and dB into device memory
float* dA, *dB, *dC;
cudaError_t err;
uint size = M*N*sizeof(float);
err = cudaMalloc((void**)&dA, size);
printf("CUDA malloc A: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
printf("Copy B to device: %s\n",cudaGetErrorString(err));
size = N*K*sizeof(float);
err = cudaMalloc((void**)&dB, size);
printf("CUDA malloc B: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
printf("Copy B to device: %s\n",cudaGetErrorString(err));
//create dC in device memory
size = N*K*sizeof(float);
err = cudaMalloc((void**)&dC, size);
printf("CUDA malloc C: %s\n",cudaGetErrorString(err));
//kernel
dim3 blockSize(16,32);
dim3 gridSize;
gridSize.x = (K+15)/16;
gridSize.y = (M+31)/32;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
matMulKernel<<<gridSize, blockSize>>>(dA, dB, dC);
err = cudaThreadSynchronize();
printf("Run kernel: %s\n", cudaGetErrorString(err));
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
// Read C from device memory
err = cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",cudaGetErrorString(err));
//Free device memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
time3 = diff(time1,time2);
printf("%ld:%ld\n",time3.tv_sec, time3.tv_nsec);
/* for(int i=0;i<M;i++)
{
for(int j=0;j<K;j++)
{
printf("%f ",hC[i*K+j]);
}
printf("\n");
}*/
}
|
51e8bdfb3662c7ca8023c2c01e41a17a1feacee3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + kW, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput_fixedKW<KW, scalar_t, accreal>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \
break
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
Acctype normFactor, int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.getSize(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3))
{
Acctype sum = 0.0;
Dtype *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<Acctype, Dtype>::to(sum * normFactor);
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#include "generic/VolumetricAveragePooling.cu"
#include "THHGenerateFloatTypes.h"
|
51e8bdfb3662c7ca8023c2c01e41a17a1feacee3.cu
|
#include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + kW, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
cuda_VolumetricAveragePooling_updateOutput_fixedKW<KW, scalar_t, accreal> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \
break
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
Acctype normFactor, int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.getSize(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3))
{
Acctype sum = 0.0;
Dtype *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<Acctype, Dtype>::to(sum * normFactor);
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#include "generic/VolumetricAveragePooling.cu"
#include "THCGenerateFloatTypes.h"
|
4efdc2891b2853fcb213607c6ec450638586140a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <patternvalidate.h>
// N is the maximum number of structs to insert
#define N 150000
__device__ Match dev_data[N];
__device__ int dev_count = 0;
__device__ PossibleMatches viable_data[N * 10];
__device__ int viable_count = 0;
__device__ int my_push_back(Match & mt) {
int insert_pt = atomicAdd(&dev_count, 1);
if (insert_pt < N) {
dev_data[insert_pt] = mt;
return insert_pt;
}
else return -1;
}
__device__ int insert_viable_pattern(PossibleMatches & mt) {
int insert_pt = atomicAdd(&viable_count, 1);
if (insert_pt < N * 10) {
viable_data[insert_pt] = mt;
return insert_pt;
}
else return -1;
}
__global__ void checkpatternkernal(int img_width, int img_height, unsigned char *img_data, unsigned char *frequency, int max)
{
int findwdth = blockIdx.y * blockDim.y + threadIdx.y;
int findhght = blockIdx.x * blockDim.x + threadIdx.x;
if (findwdth < img_width && findhght < img_height) {
if (!(findhght > img_height / 4 && findwdth > img_width / 4) && (findhght < img_height && findwdth < img_width) && findhght > img_height / 8 && findwdth > img_width / 8) {
for (int i = 0; i < img_height; i+=(findhght/2))
{
for (int j = 0; j < img_width; j+=(findwdth/2))
{
int offset_rgb = i*img_width * 3 + j * 3;
//find position of pattern to be tested
if (((offset_rgb + findhght*img_width * 3) / (img_width * 3)) == ((offset_rgb + findhght*img_width * 3 + findwdth * 3) / (img_width * 3))
&& (offset_rgb + findhght*img_width * 3 + findwdth * 3 < img_height*img_width * 3 ) && (i*img_width + j +findhght*img_width+findwdth<img_height*img_width) ) {
int sidepixel = 100000;
int similarpixel = 0;
/*for (int ii = 0; ii < findhght; i++) {
int offset_gray = i*img_width + j + ii*img_width + 0;
int offset_gray2 = i*img_width + j + ii*img_width + (findwdth - 1);
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 10 && -10 <= img_data[offset_gray] - frequency[k]) {
sidepixel++;
break;
}
}
for (int k = 0; k < max; k++) {
if (img_data[offset_gray2] - frequency[k] <= 10 && -10 <= img_data[offset_gray2] - frequency[k]) {
sidepixel++;
break;
}
}
}
for (int jj = 0; jj < findwdth; jj++) {
int offset_gray = i*img_width + j + 0*img_width + jj;
int offset_gray2 = i*img_width + j + (findhght - 1)*img_width + jj;
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 10 && -10 <= img_data[offset_gray] - frequency[k]) {
sidepixel++;
break;
}
}
for (int k = 0; k < max; k++) {
if (img_data[offset_gray2] - frequency[k] <= 10 && -10 <= img_data[offset_gray2] - frequency[k]) {
sidepixel++;
break;
}
}
}*/
if (sidepixel >= (findhght + findwdth)*0.1) {
for (int ii = 0; ii < findhght; ii++) {
for (int jj = 0; jj < findwdth; jj++) {
int offset_gray = i*img_width + j + ii*img_width + jj;
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 6 && -6 <= img_data[offset_gray] - frequency[k]) {
similarpixel++;
break;
}
}
}
}
}
if (similarpixel >= findwdth * findhght * 0.35) {
PossibleMatches temp;
temp.startpos = offset_rgb;
temp.findhght = findhght;
temp.findwdth = findwdth;
insert_viable_pattern(temp);
}
}
}
}
}
}
}
__global__ void patternkernal(int img_width, int img_height, PossibleMatches *patterns, unsigned char *rgb, int max)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < max) {
bool samerow = false;
double pixeldiffcount = 0;
int occurences = 0;
int startpos = patterns[index].startpos;
int findwdth = patterns[index].findwdth;
int findhght = patterns[index].findhght;
//find occurences in image
for (int ii = 0; ii < img_height; ii += (int)img_height / 33)
{
samerow = false;
for (int jj = 0; jj < img_width; jj += (int)img_width / 33)
{
int offset_rgb2 = ii*img_width * 3 + jj * 3;
pixeldiffcount = 0;
if (offset_rgb2 + (findhght + 1)*img_width * 3 + (findwdth + 1) * 3 < img_height*img_width * 3 &&
((offset_rgb2 + findhght*img_width * 3) / (img_width * 3) ==
(offset_rgb2 + findhght*img_width * 3 + findwdth * 3) / (img_width * 3))) {
//search within image
for (int dwn = 0; dwn < findhght; dwn++) {
for (int rght = 0; rght < findwdth; rght++) {
unsigned char tr = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 0];
unsigned char tg = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 1];
unsigned char tb = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 2];
unsigned char pr = rgb[startpos + dwn*img_width * 3 + rght * 3 + 0];
unsigned char pg = rgb[startpos + dwn*img_width * 3 + rght * 3 + 1];
unsigned char pb = rgb[startpos + dwn*img_width * 3 + rght * 3 + 2];
pixeldiffcount += (sqrtf(((tr - pr) * (tr - pr) + (tg - pg) * (tg - pg) + (tb - pb) * (tb - pb))));
if (pixeldiffcount > (findhght*findwdth) * 255 * 0.4f)
break;
}
if (pixeldiffcount > (findhght*findwdth) * 255 * 0.4f)
break;
/*if (!sameline)
break;*/
}
//printf("%d\n", samepixel);
if (pixeldiffcount <= (findhght*findwdth) * 255 * 0.2f) {
occurences++;
jj += (findwdth - 1);
if (!samerow)
samerow = true;
}
else if (pixeldiffcount > findhght*findwdth * 255 * 0.4f && findwdth < img_width / 2)
jj += (findwdth / 4);
}
}
if (samerow)
ii += (findhght - 1);
}
//printf("2\n");
//__syncthreads();
if (occurences >= 4) {
Match temp;
temp.startpos = startpos;
temp.width = findwdth;
temp.height = findhght;
temp.times = occurences;
my_push_back(temp);
}
}
}
extern "C" void checkpatterns(
int img_width,
int img_height,
unsigned char *img_data,
vector<unsigned char>& frequency,
vector<PossibleMatches>& output)
{
int size;
hipError_t error;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
unsigned char *dimg_data;
unsigned char *dfrequency;
error = hipMalloc((void **)&dimg_data, img_width*img_height * sizeof(unsigned char));
error = hipMalloc((void **)&dfrequency, sizeof(PossibleMatches) * frequency.size());
if (error != hipSuccess)
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
error = hipMemcpy(dimg_data, img_data, img_width*img_height * sizeof(unsigned char), hipMemcpyHostToDevice);
error = hipMemcpy(dfrequency, &*frequency.begin(), sizeof(unsigned char) * frequency.size(), hipMemcpyHostToDevice);
int max = frequency.size();
int block_x_dim = 2000;
int block_y_dim = 2000;
dim3 threads_per_block((img_width + 1999) / 2000, (img_height + 1999) / 2000);
dim3 blocks_per_dimension(block_x_dim, block_y_dim);
checkpatternkernal << < blocks_per_dimension, threads_per_block >> > (img_width, img_height, dimg_data, dfrequency, max);
hipDeviceSynchronize();
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("stopped\n");
hipMemcpyFromSymbol(&size, viable_count, sizeof(int));
if (size >= N * 10) { printf("overflow error\n"); size = 150000; }
vector<PossibleMatches> results(size);
if (size != 0)
hipMemcpyFromSymbol(&(results[0]), viable_data, size * sizeof(PossibleMatches));
//hipFree(dev_data);
printf("first: %d\n", size);
output = results;
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("first: %.7f\n", milliseconds);
hipFree(dimg_data);
hipFree(dfrequency);
hipDeviceSynchronize();
}
extern "C" void externalfunction(
int img_width,
int img_height,
unsigned char *rgb,
vector<PossibleMatches>& input,
vector<Match>& output)
{
hipError_t error;
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
int size;
unsigned char *drgb;
PossibleMatches *dinput;
error = hipMalloc((void **)&drgb, img_width*img_height * 3 * sizeof(unsigned char));
error = hipMalloc((void **)&dinput, sizeof(PossibleMatches) * input.size());
if (error != hipSuccess)
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
error = hipMemcpy(drgb, rgb, img_width*img_height * 3 * sizeof(unsigned char), hipMemcpyHostToDevice);
error = hipMemcpy(dinput, &*input.begin(), sizeof(PossibleMatches) * input.size(), hipMemcpyHostToDevice);
if (error != hipSuccess)
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
/*int block_x_dim = 2000;
int block_y_dim = 2000;
dim3 threads_per_block((img_width + 1999) / 2000, (img_height + 1999) / 2000);
dim3 blocks_per_dimension(block_x_dim, block_y_dim);*/
printf("%d\n", img_width);
printf("%d\n", img_height);
int max = input.size();
hipEventRecord(start1);
patternkernal << <(max + 1024 - 1) / 1024, 1024 >> > (img_width, img_height, dinput, drgb, max);
hipDeviceSynchronize();
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
printf("CUDA error: %s\n", hipGetErrorString(error));
printf("stopped\n");
hipMemcpyFromSymbol(&size, dev_count, sizeof(int));
if (size >= N) { printf("overflow error\n"); size = 15000; }
vector<Match> results(size);
if (size != 0)
hipMemcpyFromSymbol(&(results[0]), dev_data, size * sizeof(Match));
//hipFree(dev_data);
printf("%d\n", size);
output = results;
hipEventRecord(stop1);
hipEventSynchronize(stop1);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start1, stop1);
printf("%.7f\n", milliseconds);
hipFree(drgb);
hipDeviceReset();
}
|
4efdc2891b2853fcb213607c6ec450638586140a.cu
|
#include <patternvalidate.h>
// N is the maximum number of structs to insert
#define N 150000
__device__ Match dev_data[N];
__device__ int dev_count = 0;
__device__ PossibleMatches viable_data[N * 10];
__device__ int viable_count = 0;
__device__ int my_push_back(Match & mt) {
int insert_pt = atomicAdd(&dev_count, 1);
if (insert_pt < N) {
dev_data[insert_pt] = mt;
return insert_pt;
}
else return -1;
}
__device__ int insert_viable_pattern(PossibleMatches & mt) {
int insert_pt = atomicAdd(&viable_count, 1);
if (insert_pt < N * 10) {
viable_data[insert_pt] = mt;
return insert_pt;
}
else return -1;
}
__global__ void checkpatternkernal(int img_width, int img_height, unsigned char *img_data, unsigned char *frequency, int max)
{
int findwdth = blockIdx.y * blockDim.y + threadIdx.y;
int findhght = blockIdx.x * blockDim.x + threadIdx.x;
if (findwdth < img_width && findhght < img_height) {
if (!(findhght > img_height / 4 && findwdth > img_width / 4) && (findhght < img_height && findwdth < img_width) && findhght > img_height / 8 && findwdth > img_width / 8) {
for (int i = 0; i < img_height; i+=(findhght/2))
{
for (int j = 0; j < img_width; j+=(findwdth/2))
{
int offset_rgb = i*img_width * 3 + j * 3;
//find position of pattern to be tested
if (((offset_rgb + findhght*img_width * 3) / (img_width * 3)) == ((offset_rgb + findhght*img_width * 3 + findwdth * 3) / (img_width * 3))
&& (offset_rgb + findhght*img_width * 3 + findwdth * 3 < img_height*img_width * 3 ) && (i*img_width + j +findhght*img_width+findwdth<img_height*img_width) ) {
int sidepixel = 100000;
int similarpixel = 0;
/*for (int ii = 0; ii < findhght; i++) {
int offset_gray = i*img_width + j + ii*img_width + 0;
int offset_gray2 = i*img_width + j + ii*img_width + (findwdth - 1);
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 10 && -10 <= img_data[offset_gray] - frequency[k]) {
sidepixel++;
break;
}
}
for (int k = 0; k < max; k++) {
if (img_data[offset_gray2] - frequency[k] <= 10 && -10 <= img_data[offset_gray2] - frequency[k]) {
sidepixel++;
break;
}
}
}
for (int jj = 0; jj < findwdth; jj++) {
int offset_gray = i*img_width + j + 0*img_width + jj;
int offset_gray2 = i*img_width + j + (findhght - 1)*img_width + jj;
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 10 && -10 <= img_data[offset_gray] - frequency[k]) {
sidepixel++;
break;
}
}
for (int k = 0; k < max; k++) {
if (img_data[offset_gray2] - frequency[k] <= 10 && -10 <= img_data[offset_gray2] - frequency[k]) {
sidepixel++;
break;
}
}
}*/
if (sidepixel >= (findhght + findwdth)*0.1) {
for (int ii = 0; ii < findhght; ii++) {
for (int jj = 0; jj < findwdth; jj++) {
int offset_gray = i*img_width + j + ii*img_width + jj;
for (int k = 0; k < max; k++) {
if (img_data[offset_gray] - frequency[k] <= 6 && -6 <= img_data[offset_gray] - frequency[k]) {
similarpixel++;
break;
}
}
}
}
}
if (similarpixel >= findwdth * findhght * 0.35) {
PossibleMatches temp;
temp.startpos = offset_rgb;
temp.findhght = findhght;
temp.findwdth = findwdth;
insert_viable_pattern(temp);
}
}
}
}
}
}
}
__global__ void patternkernal(int img_width, int img_height, PossibleMatches *patterns, unsigned char *rgb, int max)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < max) {
bool samerow = false;
double pixeldiffcount = 0;
int occurences = 0;
int startpos = patterns[index].startpos;
int findwdth = patterns[index].findwdth;
int findhght = patterns[index].findhght;
//find occurences in image
for (int ii = 0; ii < img_height; ii += (int)img_height / 33)
{
samerow = false;
for (int jj = 0; jj < img_width; jj += (int)img_width / 33)
{
int offset_rgb2 = ii*img_width * 3 + jj * 3;
pixeldiffcount = 0;
if (offset_rgb2 + (findhght + 1)*img_width * 3 + (findwdth + 1) * 3 < img_height*img_width * 3 &&
((offset_rgb2 + findhght*img_width * 3) / (img_width * 3) ==
(offset_rgb2 + findhght*img_width * 3 + findwdth * 3) / (img_width * 3))) {
//search within image
for (int dwn = 0; dwn < findhght; dwn++) {
for (int rght = 0; rght < findwdth; rght++) {
unsigned char tr = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 0];
unsigned char tg = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 1];
unsigned char tb = rgb[offset_rgb2 + dwn*img_width * 3 + rght * 3 + 2];
unsigned char pr = rgb[startpos + dwn*img_width * 3 + rght * 3 + 0];
unsigned char pg = rgb[startpos + dwn*img_width * 3 + rght * 3 + 1];
unsigned char pb = rgb[startpos + dwn*img_width * 3 + rght * 3 + 2];
pixeldiffcount += (sqrtf(((tr - pr) * (tr - pr) + (tg - pg) * (tg - pg) + (tb - pb) * (tb - pb))));
if (pixeldiffcount > (findhght*findwdth) * 255 * 0.4f)
break;
}
if (pixeldiffcount > (findhght*findwdth) * 255 * 0.4f)
break;
/*if (!sameline)
break;*/
}
//printf("%d\n", samepixel);
if (pixeldiffcount <= (findhght*findwdth) * 255 * 0.2f) {
occurences++;
jj += (findwdth - 1);
if (!samerow)
samerow = true;
}
else if (pixeldiffcount > findhght*findwdth * 255 * 0.4f && findwdth < img_width / 2)
jj += (findwdth / 4);
}
}
if (samerow)
ii += (findhght - 1);
}
//printf("2\n");
//__syncthreads();
if (occurences >= 4) {
Match temp;
temp.startpos = startpos;
temp.width = findwdth;
temp.height = findhght;
temp.times = occurences;
my_push_back(temp);
}
}
}
extern "C" void checkpatterns(
int img_width,
int img_height,
unsigned char *img_data,
vector<unsigned char>& frequency,
vector<PossibleMatches>& output)
{
int size;
cudaError_t error;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unsigned char *dimg_data;
unsigned char *dfrequency;
error = cudaMalloc((void **)&dimg_data, img_width*img_height * sizeof(unsigned char));
error = cudaMalloc((void **)&dfrequency, sizeof(PossibleMatches) * frequency.size());
if (error != cudaSuccess)
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
error = cudaMemcpy(dimg_data, img_data, img_width*img_height * sizeof(unsigned char), cudaMemcpyHostToDevice);
error = cudaMemcpy(dfrequency, &*frequency.begin(), sizeof(unsigned char) * frequency.size(), cudaMemcpyHostToDevice);
int max = frequency.size();
int block_x_dim = 2000;
int block_y_dim = 2000;
dim3 threads_per_block((img_width + 1999) / 2000, (img_height + 1999) / 2000);
dim3 blocks_per_dimension(block_x_dim, block_y_dim);
checkpatternkernal << < blocks_per_dimension, threads_per_block >> > (img_width, img_height, dimg_data, dfrequency, max);
cudaThreadSynchronize();
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("stopped\n");
cudaMemcpyFromSymbol(&size, viable_count, sizeof(int));
if (size >= N * 10) { printf("overflow error\n"); size = 150000; }
vector<PossibleMatches> results(size);
if (size != 0)
cudaMemcpyFromSymbol(&(results[0]), viable_data, size * sizeof(PossibleMatches));
//cudaFree(dev_data);
printf("first: %d\n", size);
output = results;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("first: %.7f\n", milliseconds);
cudaFree(dimg_data);
cudaFree(dfrequency);
cudaDeviceSynchronize();
}
extern "C" void externalfunction(
int img_width,
int img_height,
unsigned char *rgb,
vector<PossibleMatches>& input,
vector<Match>& output)
{
cudaError_t error;
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
int size;
unsigned char *drgb;
PossibleMatches *dinput;
error = cudaMalloc((void **)&drgb, img_width*img_height * 3 * sizeof(unsigned char));
error = cudaMalloc((void **)&dinput, sizeof(PossibleMatches) * input.size());
if (error != cudaSuccess)
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
error = cudaMemcpy(drgb, rgb, img_width*img_height * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
error = cudaMemcpy(dinput, &*input.begin(), sizeof(PossibleMatches) * input.size(), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
/*int block_x_dim = 2000;
int block_y_dim = 2000;
dim3 threads_per_block((img_width + 1999) / 2000, (img_height + 1999) / 2000);
dim3 blocks_per_dimension(block_x_dim, block_y_dim);*/
printf("%d\n", img_width);
printf("%d\n", img_height);
int max = input.size();
cudaEventRecord(start1);
patternkernal << <(max + 1024 - 1) / 1024, 1024 >> > (img_width, img_height, dinput, drgb, max);
cudaThreadSynchronize();
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
printf("CUDA error: %s\n", cudaGetErrorString(error));
printf("stopped\n");
cudaMemcpyFromSymbol(&size, dev_count, sizeof(int));
if (size >= N) { printf("overflow error\n"); size = 15000; }
vector<Match> results(size);
if (size != 0)
cudaMemcpyFromSymbol(&(results[0]), dev_data, size * sizeof(Match));
//cudaFree(dev_data);
printf("%d\n", size);
output = results;
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start1, stop1);
printf("%.7f\n", milliseconds);
cudaFree(drgb);
cudaDeviceReset();
}
|
e1026f3bf273a325427f5811ceb18e5ae8ac323e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by developer on 5/24/20.
//
#include "saxpy.cuh"
__global__
void run(int n, float a, float *d_x, float *d_y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) d_y[i] = a * d_x[i] + d_y[i];
}
void saxpy(int N, float a, float *x, float *y)
{
float *d_x, *d_y;
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on N elements
hipLaunchKernelGGL(( run), dim3((N+255)/256), dim3(256), 0, 0, N, a, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_y);
}
|
e1026f3bf273a325427f5811ceb18e5ae8ac323e.cu
|
//
// Created by developer on 5/24/20.
//
#include "saxpy.cuh"
__global__
void run(int n, float a, float *d_x, float *d_y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) d_y[i] = a * d_x[i] + d_y[i];
}
void saxpy(int N, float a, float *x, float *y)
{
float *d_x, *d_y;
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on N elements
run<<<(N+255)/256, 256>>>(N, a, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
}
|
1507c0d6d9979b7095ff97e544cbc0d4097f0007.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This is the sequential version of the task
* Apart from each kernel looping over all the candidates, the code is
* same as GPU version but more messy and undocumented.
*
* For understanding the code please see ../gpu/main.cu
* Even though these scripts are pretty much same, they were kept separate
* for ease of experimentation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "constants_cpu.c"
#include "utils_cpu.h"
void init(hiprandGenerator_t gen, float* states) {
hiprandCreateGeneratorHost(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, rand());
hiprandGenerateUniform(gen, states, ISLANDS);
hiprandDestroyGenerator(gen);
}
void getPopulationFitness(int* population_d, float* population_cost_d, float* population_fitness_d, float* citymap_d) {
for(int i=0;i<ISLANDS;i++)
evaluateRoute(population_d, population_cost_d, population_fitness_d, citymap_d, i);
}
void mutation(int* population_d, float* population_cost_d, float* population_fitness_d, float* states_1, float* states_2) {
//generating new set of random nums for randNum2
for(int tid=0; tid< ISLANDS; tid++) {
if (states_1[tid] < mutation_ratio) {
// This gives better score than using Random
int randNum1 = 1 + states_1[tid] * (num_cities - 1.0000001);
int randNum2 = 1 + states_2[tid] * (num_cities - 1.0000001);
//printf("%d %d\n", randNum1, randNum2);
int city_temp = population_d[tid*num_cities + randNum1];
population_d[tid*num_cities + randNum1] = population_d[tid*num_cities + randNum2];
population_d[tid*num_cities + randNum2] = city_temp;
}
}
}
void crossover(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, float* citymap_d, int index) {
// Get thread (particle) ID
for(int tid=0; tid< ISLANDS; tid++) {
population_d[tid*num_cities] = parent_cities_d[tid* (2*num_cities)];
int parent_city_ptr[num_cities];
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2 + i];
int tourarray[num_cities];
for(int i=0; i<num_cities;i++)
tourarray[i] = population_d[tid*num_cities + i];
int current_city_id = population_d[tid*num_cities + index - 1];
int c1 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2+num_cities + i];
int c2 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
if(citymap_d[c1*num_cities + current_city_id] <= citymap_d[c2*num_cities + current_city_id])
population_d[tid*num_cities + index] = c1;
else
population_d[tid*num_cities + index] = c2;
}
}
int* tournamentSelection(int* population_d, float* population_cost_d,
float* population_fitness_d, float* states_d, int tid) {
int tournament[tournament_size*num_cities];
float tournament_fitness[tournament_size];
float tournament_cost[tournament_size];
int randNum;
for (int i = 0; i < tournament_size; i++) {
randNum = states_d[i] * (ISLANDS - 1);
//printf("%d %d\n", states_d[tid], randNum);
for(int c=0; c<num_cities; c++) {
tournament[i*num_cities + c] = population_d[randNum*num_cities + c];
tournament_cost[i] = population_cost_d[randNum];
tournament_fitness[i] = population_fitness_d[randNum];
}
}
int fittest = getFittestTourIndex(tournament, tournament_cost, tournament_fitness);
int fittest_route[num_cities];
for(int c=0; c<num_cities; c++) {
fittest_route[c] = tournament[fittest*num_cities + c];
}
return fittest_route;
}
void selection(
int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, float* states_1, float* states_2) {
int* parent1;
/*
if(ELITISM && (blockIdx.x == 0)) {
int fittest = getFittestTourIndex(population_d, population_cost_d, population_fitness_d);
for(int c=0; c<num_cities; c++) {
parent_cities_d[tid* (2*num_cities) +c] = population_d[fittest*num_cities + c];
parent_cities_d[tid* (2*num_cities) +num_cities +c] = population_d[fittest*num_cities + c];
}
} else {
*/
for(int tid=0; tid< ISLANDS; tid++) {
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_1, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +c] = parent1[c];
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_2, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +num_cities +c] = parent1[c];
}
//}
}
int main() {
int max_val = 250;
float citymap[num_cities*num_cities];
int* population = (int*)calloc(ISLANDS*num_cities, sizeof(int));
float* population_fitness = (float*)calloc(ISLANDS, sizeof(float));
float* population_cost = (float*)calloc(ISLANDS, sizeof(float));
int* parent_cities = (int*)calloc(ISLANDS*num_cities*2, sizeof(int));
float* states = (float*)calloc(ISLANDS, sizeof(float));
float* states_2 = (float*)calloc(ISLANDS, sizeof(float));
hiprandGenerator_t gen;
printf("Num islands: %d\n", ISLANDS);
printf("Population size: %d\n", ISLANDS*num_cities);
//building cost table
for(int i=0; i<num_cities; i++) {
for(int j=0; j<num_cities; j++) {
if(i!=j) {
citymap[i*num_cities+j] = L2distance(city_x[i], city_y[i], city_x[j], city_y[j]);
} else {
citymap[i*num_cities+j] = max_val * max_val;
}
}
}
initalizeRandomPopulation(population, population_cost, population_fitness, citymap);
int fittest = getFittestScore(population_fitness);
printf("min distance: %f\n", population_cost[fittest]);
float milliseconds;
clock_t start, end;
start = clock();
for(int i = 0; i < num_generations; i++ ) {
init(gen, states);
init(gen, states_2);
selection(
population, population_cost, population_fitness, parent_cities, states, states_2);
for (int j = 1; j < num_cities; j++)
crossover(population, population_cost, population_fitness, parent_cities, citymap, j);
mutation(population, population_cost, population_fitness, states, states_2);
getPopulationFitness(
population, population_cost, population_fitness, citymap);
if(i>0 && i % print_interval == 0) {
fittest = getFittestScore(population_fitness);
printf("Iteration:%d, min distance: %f\n", i, population_cost[fittest]);
}
//printf("---------------\n");
}
end = clock();
milliseconds = ((double) (end - start)) / CLOCKS_PER_SEC;
fittest = getFittestScore(population_fitness);
printf("time: %f, min distance: %f\n", milliseconds, population_cost[fittest]);
return 0;
}
|
1507c0d6d9979b7095ff97e544cbc0d4097f0007.cu
|
/*
* This is the sequential version of the task
* Apart from each kernel looping over all the candidates, the code is
* same as GPU version but more messy and undocumented.
*
* For understanding the code please see ../gpu/main.cu
* Even though these scripts are pretty much same, they were kept separate
* for ease of experimentation.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include "constants_cpu.c"
#include "utils_cpu.h"
void init(curandGenerator_t gen, float* states) {
curandCreateGeneratorHost(&gen,
CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, rand());
curandGenerateUniform(gen, states, ISLANDS);
curandDestroyGenerator(gen);
}
void getPopulationFitness(int* population_d, float* population_cost_d, float* population_fitness_d, float* citymap_d) {
for(int i=0;i<ISLANDS;i++)
evaluateRoute(population_d, population_cost_d, population_fitness_d, citymap_d, i);
}
void mutation(int* population_d, float* population_cost_d, float* population_fitness_d, float* states_1, float* states_2) {
//generating new set of random nums for randNum2
for(int tid=0; tid< ISLANDS; tid++) {
if (states_1[tid] < mutation_ratio) {
// This gives better score than using Random
int randNum1 = 1 + states_1[tid] * (num_cities - 1.0000001);
int randNum2 = 1 + states_2[tid] * (num_cities - 1.0000001);
//printf("%d %d\n", randNum1, randNum2);
int city_temp = population_d[tid*num_cities + randNum1];
population_d[tid*num_cities + randNum1] = population_d[tid*num_cities + randNum2];
population_d[tid*num_cities + randNum2] = city_temp;
}
}
}
void crossover(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, float* citymap_d, int index) {
// Get thread (particle) ID
for(int tid=0; tid< ISLANDS; tid++) {
population_d[tid*num_cities] = parent_cities_d[tid* (2*num_cities)];
int parent_city_ptr[num_cities];
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2 + i];
int tourarray[num_cities];
for(int i=0; i<num_cities;i++)
tourarray[i] = population_d[tid*num_cities + i];
int current_city_id = population_d[tid*num_cities + index - 1];
int c1 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2+num_cities + i];
int c2 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
if(citymap_d[c1*num_cities + current_city_id] <= citymap_d[c2*num_cities + current_city_id])
population_d[tid*num_cities + index] = c1;
else
population_d[tid*num_cities + index] = c2;
}
}
int* tournamentSelection(int* population_d, float* population_cost_d,
float* population_fitness_d, float* states_d, int tid) {
int tournament[tournament_size*num_cities];
float tournament_fitness[tournament_size];
float tournament_cost[tournament_size];
int randNum;
for (int i = 0; i < tournament_size; i++) {
randNum = states_d[i] * (ISLANDS - 1);
//printf("%d %d\n", states_d[tid], randNum);
for(int c=0; c<num_cities; c++) {
tournament[i*num_cities + c] = population_d[randNum*num_cities + c];
tournament_cost[i] = population_cost_d[randNum];
tournament_fitness[i] = population_fitness_d[randNum];
}
}
int fittest = getFittestTourIndex(tournament, tournament_cost, tournament_fitness);
int fittest_route[num_cities];
for(int c=0; c<num_cities; c++) {
fittest_route[c] = tournament[fittest*num_cities + c];
}
return fittest_route;
}
void selection(
int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, float* states_1, float* states_2) {
int* parent1;
/*
if(ELITISM && (blockIdx.x == 0)) {
int fittest = getFittestTourIndex(population_d, population_cost_d, population_fitness_d);
for(int c=0; c<num_cities; c++) {
parent_cities_d[tid* (2*num_cities) +c] = population_d[fittest*num_cities + c];
parent_cities_d[tid* (2*num_cities) +num_cities +c] = population_d[fittest*num_cities + c];
}
} else {
*/
for(int tid=0; tid< ISLANDS; tid++) {
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_1, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +c] = parent1[c];
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_2, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +num_cities +c] = parent1[c];
}
//}
}
int main() {
int max_val = 250;
float citymap[num_cities*num_cities];
int* population = (int*)calloc(ISLANDS*num_cities, sizeof(int));
float* population_fitness = (float*)calloc(ISLANDS, sizeof(float));
float* population_cost = (float*)calloc(ISLANDS, sizeof(float));
int* parent_cities = (int*)calloc(ISLANDS*num_cities*2, sizeof(int));
float* states = (float*)calloc(ISLANDS, sizeof(float));
float* states_2 = (float*)calloc(ISLANDS, sizeof(float));
curandGenerator_t gen;
printf("Num islands: %d\n", ISLANDS);
printf("Population size: %d\n", ISLANDS*num_cities);
//building cost table
for(int i=0; i<num_cities; i++) {
for(int j=0; j<num_cities; j++) {
if(i!=j) {
citymap[i*num_cities+j] = L2distance(city_x[i], city_y[i], city_x[j], city_y[j]);
} else {
citymap[i*num_cities+j] = max_val * max_val;
}
}
}
initalizeRandomPopulation(population, population_cost, population_fitness, citymap);
int fittest = getFittestScore(population_fitness);
printf("min distance: %f\n", population_cost[fittest]);
float milliseconds;
clock_t start, end;
start = clock();
for(int i = 0; i < num_generations; i++ ) {
init(gen, states);
init(gen, states_2);
selection(
population, population_cost, population_fitness, parent_cities, states, states_2);
for (int j = 1; j < num_cities; j++)
crossover(population, population_cost, population_fitness, parent_cities, citymap, j);
mutation(population, population_cost, population_fitness, states, states_2);
getPopulationFitness(
population, population_cost, population_fitness, citymap);
if(i>0 && i % print_interval == 0) {
fittest = getFittestScore(population_fitness);
printf("Iteration:%d, min distance: %f\n", i, population_cost[fittest]);
}
//printf("---------------\n");
}
end = clock();
milliseconds = ((double) (end - start)) / CLOCKS_PER_SEC;
fittest = getFittestScore(population_fitness);
printf("time: %f, min distance: %f\n", milliseconds, population_cost[fittest]);
return 0;
}
|
b86566bc96f9d0b733ab258e3d9774f795652e34.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/replace/nulls.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/replace.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
template <int phase, bool replacement_has_nulls>
__global__ void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type* output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter)
{
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid) {
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); }
}
template <typename Type, bool replacement_has_nulls>
__global__ void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count)
{
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
} else {
if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); }
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type nrows = input.size();
cudf::detail::grid_1d grid{nrows, BLOCK_SIZE};
auto output =
cudf::detail::allocate_like(input,
input.size(),
replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER,
stream,
mr);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable()) replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input);
auto device_out = cudf::mutable_column_device_view::create(output_view);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in, *device_replacement, *device_out, valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()) {
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(
cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1};
hipLaunchKernelGGL(( replace_first), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(
sizes_view.begin<int32_t>(), sizes_view.end<int32_t>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_view, offsets_view.size() - 1, stream);
// Allocate chars array and output null mask
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input.size(), bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
hipLaunchKernelGGL(( replace_second), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
return cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits),
stream,
mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
cudf::dictionary_column_view dict_repl(replacement);
return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr);
}
template <typename T>
struct replace_nulls_functor {
T* value_it;
replace_nulls_functor(T* _value_it) : value_it(_value_it) {}
__device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; }
};
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type,
typename std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
std::unique_ptr<cudf::column> output =
cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::scalar_type_t<col_type>;
auto s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input);
auto func = replace_nulls_functor<col_type>{s1.data()};
thrust::transform(rmm::exec_policy(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr);
}
/**
* @brief Function used by replace_nulls policy
*/
std::unique_ptr<cudf::column> replace_nulls_policy_impl(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto device_in = cudf::column_device_view::create(input);
auto index = thrust::make_counting_iterator<cudf::size_type>(0);
auto valid_it = cudf::detail::make_validity_iterator(*device_in);
auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it));
rmm::device_uvector<cudf::size_type> gather_map(input.size(), stream);
auto gm_begin = thrust::make_zip_iterator(
thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator()));
auto func = cudf::detail::replace_policy_functor();
if (replace_policy == cudf::replace_policy::PRECEDING) {
thrust::inclusive_scan(
rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func);
} else {
auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size());
auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size());
thrust::inclusive_scan(
rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func);
}
auto output = cudf::detail::gather(cudf::table_view({input}),
gather_map.begin(),
gather_map.end(),
cudf::out_of_bounds_policy::DONT_CHECK,
stream,
mr);
return std::move(output->release()[0]);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique<cudf::column>(input, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return replace_nulls_policy_impl(input, replace_policy, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> replace_nulls(column_view const& input,
replace_policy const& replace_policy,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replace_policy, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
b86566bc96f9d0b733ab258e3d9774f795652e34.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/replace/nulls.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/replace.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
template <int phase, bool replacement_has_nulls>
__global__ void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type* output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter)
{
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid) {
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); }
}
template <typename Type, bool replacement_has_nulls>
__global__ void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count)
{
cudf::size_type nrows = input.size();
cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
} else {
if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); }
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type nrows = input.size();
cudf::detail::grid_1d grid{nrows, BLOCK_SIZE};
auto output =
cudf::detail::allocate_like(input,
input.size(),
replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER,
stream,
mr);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable()) replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input);
auto device_out = cudf::mutable_column_device_view::create(output_view);
auto device_replacement = cudf::column_device_view::create(replacement);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in, *device_replacement, *device_out, valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()) {
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(
cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1};
replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(
sizes_view.begin<int32_t>(), sizes_view.end<int32_t>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_view, offsets_view.size() - 1, stream);
// Allocate chars array and output null mask
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(input.size(), bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
return cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits),
stream,
mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
cudf::dictionary_column_view dict_repl(replacement);
return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr);
}
template <typename T>
struct replace_nulls_functor {
T* value_it;
replace_nulls_functor(T* _value_it) : value_it(_value_it) {}
__device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; }
};
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type,
typename std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
std::unique_ptr<cudf::column> output =
cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::scalar_type_t<col_type>;
auto s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input);
auto func = replace_nulls_functor<col_type>{s1.data()};
thrust::transform(rmm::exec_policy(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr);
}
/**
* @brief Function used by replace_nulls policy
*/
std::unique_ptr<cudf::column> replace_nulls_policy_impl(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto device_in = cudf::column_device_view::create(input);
auto index = thrust::make_counting_iterator<cudf::size_type>(0);
auto valid_it = cudf::detail::make_validity_iterator(*device_in);
auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it));
rmm::device_uvector<cudf::size_type> gather_map(input.size(), stream);
auto gm_begin = thrust::make_zip_iterator(
thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator()));
auto func = cudf::detail::replace_policy_functor();
if (replace_policy == cudf::replace_policy::PRECEDING) {
thrust::inclusive_scan(
rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func);
} else {
auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size());
auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size());
thrust::inclusive_scan(
rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func);
}
auto output = cudf::detail::gather(cudf::table_view({input}),
gather_map.begin(),
gather_map.end(),
cudf::out_of_bounds_policy::DONT_CHECK,
stream,
mr);
return std::move(output->release()[0]);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique<cudf::column>(input, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return replace_nulls_policy_impl(input, replace_policy, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr);
}
std::unique_ptr<cudf::column> replace_nulls(column_view const& input,
replace_policy const& replace_policy,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replace_policy, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
7d33d6b6396995c3053dac235c22e46e1f331fbe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
}
|
7d33d6b6396995c3053dac235c22e46e1f331fbe.cu
|
#include "includes.h"
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
}
|
89662feec97982c8b5446edfaa00d060e09ea903.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "common/book.h"
#include "time.h"
#include "stdlib.h"
#define N (33 * 1024)
#define NUM_THREADS 128
#define NUM_BLOCKS 128
__global__ void add(int *a, int *b, int *c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x; // same as linear access in 2D grid
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x; // jump over all threads ids
}
}
int random(int min, int max) {
return min + rand() % (max - min + 1);
}
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate vectors on device
HANDLE_ERROR(hipMalloc((void**) &dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**) &dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**) &dev_c, N * sizeof(int)));
// fill host vectors randomly so that later we copy them to device
srand(time(0)); // use current time as seed
for (int i = 0; i < N; ++i) {
a[i] = random(1, 100);
b[i] = random(1, 100);
}
// copy host vectors to device
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice));
// create NUM_BLOCKS block each spawning NUM_THREADS threads
// this can be imagined as 2D grid dimension where each block
// is 1D.
// setting this depends on the hardware used, also your data.
// note that there is some hardware limit here and that's why we can
// benefit from blocks and threads combination
hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dev_a, dev_b, dev_c);
// copy result back to c
HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost));
bool success = true;
for (int i = 0; i < N && success; ++i) {
if (a[i] + b[i] != c[i])
success = false;
}
if (!success)
printf("something went wrong!\n");
else
printf("worked!\n");
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
89662feec97982c8b5446edfaa00d060e09ea903.cu
|
#include "stdio.h"
#include "common/book.h"
#include "time.h"
#include "stdlib.h"
#define N (33 * 1024)
#define NUM_THREADS 128
#define NUM_BLOCKS 128
__global__ void add(int *a, int *b, int *c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x; // same as linear access in 2D grid
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x; // jump over all threads ids
}
}
int random(int min, int max) {
return min + rand() % (max - min + 1);
}
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate vectors on device
HANDLE_ERROR(cudaMalloc((void**) &dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**) &dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**) &dev_c, N * sizeof(int)));
// fill host vectors randomly so that later we copy them to device
srand(time(0)); // use current time as seed
for (int i = 0; i < N; ++i) {
a[i] = random(1, 100);
b[i] = random(1, 100);
}
// copy host vectors to device
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
// create NUM_BLOCKS block each spawning NUM_THREADS threads
// this can be imagined as 2D grid dimension where each block
// is 1D.
// setting this depends on the hardware used, also your data.
// note that there is some hardware limit here and that's why we can
// benefit from blocks and threads combination
add<<<NUM_BLOCKS, NUM_THREADS>>>(dev_a, dev_b, dev_c);
// copy result back to c
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
bool success = true;
for (int i = 0; i < N && success; ++i) {
if (a[i] + b[i] != c[i])
success = false;
}
if (!success)
printf("something went wrong!\n");
else
printf("worked!\n");
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
11047f578f44b5db8bb6080c22b99106f697cbe5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <cmath>
#include <memory>
#include <vector>
#include "../common/span.h"
#include "../common/transform.h"
#include "../common/common.h"
#include "../common/host_device_vector.h"
#include "./regression_loss.h"
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public dmlc::Parameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<int> label_correct_;
public:
RegLossObj() = default;
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "labels are not correctly provided"
<< "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size();
size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = Loss::PredTransform(_preds[_idx]);
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float label = _labels[_idx];
if (label == 1.0f) {
w *= scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, "reg:squarederror")
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, "reg:squaredlogerror")
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, "reg:logistic")
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, "binary:logistic")
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, "binary:logitraw")
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear")
.describe("Deprecated. Linear regression (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:reg:linear is now deprecated, use reg:linear instead.";
return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic")
.describe("Deprecated. Logistic regression for probability regression task (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:reg:logistic is now deprecated, use reg:logistic instead.";
return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic")
.describe("Deprecated. Logistic regression for binary classification task (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:binary:logistic is now deprecated, use binary:logistic instead.";
return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw")
.describe("Deprecated. Logistic regression for classification, output score "
"before logistic transformation (computed on GPU)")
.set_body([]() {
LOG(WARNING) << "gpu:binary:logitraw is now deprecated, use binary:logitraw instead.";
return new RegLossObj<LogisticRaw>(); });
// End deprecated
// declare parameter
struct PoissonRegressionParam : public dmlc::Parameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Possion regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += ::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels_.HostVector();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = ::exp(p);
const double w = info.GetWeight(ind);
const double y = labels[ind];
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long j = 0; j < ndata; ++j) { // NOLINT(*)
preds[j] = ::exp(preds[j]);
}
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {}
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata);
out_gpair->Resize(ndata);
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public dmlc::Parameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, devices)
.Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
} // namespace obj
} // namespace xgboost
|
11047f578f44b5db8bb6080c22b99106f697cbe5.cu
|
/*!
* Copyright 2015-2019 by Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <cmath>
#include <memory>
#include <vector>
#include "../common/span.h"
#include "../common/transform.h"
#include "../common/common.h"
#include "../common/host_device_vector.h"
#include "./regression_loss.h"
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public dmlc::Parameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<int> label_correct_;
public:
RegLossObj() = default;
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "labels are not correctly provided"
<< "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size();
size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = Loss::PredTransform(_preds[_idx]);
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float label = _labels[_idx];
if (label == 1.0f) {
w *= scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, "reg:squarederror")
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, "reg:squaredlogerror")
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, "reg:logistic")
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, "binary:logistic")
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, "binary:logitraw")
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear")
.describe("Deprecated. Linear regression (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:reg:linear is now deprecated, use reg:linear instead.";
return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic")
.describe("Deprecated. Logistic regression for probability regression task (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:reg:logistic is now deprecated, use reg:logistic instead.";
return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic")
.describe("Deprecated. Logistic regression for binary classification task (computed on GPU).")
.set_body([]() {
LOG(WARNING) << "gpu:binary:logistic is now deprecated, use binary:logistic instead.";
return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw")
.describe("Deprecated. Logistic regression for classification, output score "
"before logistic transformation (computed on GPU)")
.set_body([]() {
LOG(WARNING) << "gpu:binary:logitraw is now deprecated, use binary:logitraw instead.";
return new RegLossObj<LogisticRaw>(); });
// End deprecated
// declare parameter
struct PoissonRegressionParam : public dmlc::Parameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Possion regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += std::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels_.HostVector();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = std::exp(p);
const double w = info.GetWeight(ind);
const double y = labels[ind];
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long j = 0; j < ndata; ++j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
}
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {}
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata);
out_gpair->Resize(ndata);
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, devices).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public dmlc::Parameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, preds.Size());
label_correct_.Resize(devices.IsEmpty() ? 1 : devices.Size());
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, devices)
.Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, io_preds->Size()))
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
} // namespace obj
} // namespace xgboost
|
df30015301dcd97b36c1b376638cb22904c6ee3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "task_correlation.cuh"
template<typename T>
__device__ void d_blockReduce( T* arr, int numElements2 ) {
for( int stride = ( numElements2 >> 1 ); stride; stride >>= 1 ) {
if( threadIdx.x < stride ) {
int ui = threadIdx.x + stride;
if( ui < blockDim.x ) {
arr[threadIdx.x] += arr[ui];
}
}
__syncthreads();
}
}
template<typename T>
__global__ void d_globalCorrelation( DeviceMemory<T>* mem, T divSumA, T divSumB, T* image ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= mem->fframeW ) return;
T assq = 0;
T bssq = 0;
T q = 0;
for( int i = 0, pxId = idx; i < mem->frameH; i++ ) {
if(mem->mask[pxId]) {
T va = ( mem->differenceImage[pxId] - divSumA ); //wI[pxId]
assq += va * va;
T vb = ( image[pxId] - divSumB );
bssq += vb * vb;
q += va * vb;
pxId += mem->fframeW;
}
}
mem->qcAssq[idx] = assq;
mem->qcBssq[idx] = bssq;
mem->qcq[idx] = q;
}
template<typename T>
__global__ void d_clearImage( bool* mask, T* image, T* dest, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= N ) return;
T iv[2] = { 0, image[idx] };
dest[idx] = iv[mask[idx]];
}
template<typename T>
__global__ void d_swapWIDiff( DeviceMemory<T>* mem, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= N ) return;
if(!mem->mask[idx]) return;
//differenceImage = image - wI;
//wI = image - differenceImage
mem->differenceImage[idx] = mem->image[idx] - mem->differenceImage[idx];
}
template<typename T>
void hd_globalCorrelation( DeviceMemory<T>& d_Ptr, T* corr ) {
int numBlocks;
int numPixel = d_Ptr.frameH * d_Ptr.fframeW;
numBlocks = ( numPixel + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( d_clearImage<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.mask, d_Ptr.image, d_Ptr.qcImg, numPixel );
hipLaunchKernelGGL(( d_swapWIDiff<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.d_mem, numPixel );
thrust::device_ptr<T> qcImg = thrust::device_pointer_cast( d_Ptr.image );
thrust::device_ptr<T> qcWi = thrust::device_pointer_cast( d_Ptr.differenceImage ); //changed from d_Ptr.wI
thrust::device_ptr<bool> qcMask = thrust::device_pointer_cast( d_Ptr.mask );
hipDeviceSynchronize();
T divSumA = thrust::reduce( qcImg, qcImg + numPixel );
T divSumB = thrust::reduce( qcWi, qcWi + numPixel );
int n = thrust::count( qcMask, qcMask + numPixel, true );
divSumA /= n;
divSumB /= n;
numBlocks = ( d_Ptr.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( d_globalCorrelation<T>) , dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.d_mem, divSumA, divSumB, d_Ptr.qcImg );
thrust::device_ptr<T> dpAssq = thrust::device_pointer_cast( d_Ptr.qcAssq );
thrust::device_ptr<T> dpBssq = thrust::device_pointer_cast( d_Ptr.qcBssq );
thrust::device_ptr<T> dpQcq = thrust::device_pointer_cast( d_Ptr.qcq );
T assq = thrust::reduce( dpAssq, dpAssq + d_Ptr.fframeW );
T bssq = thrust::reduce( dpBssq, dpBssq + d_Ptr.fframeW );
T q = thrust::reduce( dpQcq, dpQcq + d_Ptr.fframeW );
*corr = q / sqrt( assq * bssq );
hipLaunchKernelGGL(( d_swapWIDiff<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.d_mem, numPixel );
}
template<typename T>
__global__ void d_frameCorrelation( DeviceMemory<T>* mem ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ char s_mem[];
int idpx = idx;
T* buffA = reinterpret_cast<T*>( &s_mem[sizeof(T) * blockDim.x * 0] );
T* buffB = reinterpret_cast<T*>( &s_mem[sizeof(T) * blockDim.x * 1] );
int* buffC = reinterpret_cast<int*>( &s_mem[sizeof(T) * blockDim.x * 2] );
T* tbuffC = reinterpret_cast<T*>( buffC );
T blob[2]; blob[0] = 0;
buffA[threadIdx.x] = 0;
buffB[threadIdx.x] = 0;
buffC[threadIdx.x] = 0;
for( int i = 0; i < mem->frameH; i++ ) {
int used = mem->mask[idpx];
buffC[threadIdx.x] += used;
blob[1] = mem->image[idpx];
buffB[threadIdx.x] += blob[used];
blob[1] = mem->differenceImage[idpx]; //changed from wI
buffA[threadIdx.x] += blob[used];
idpx += mem->fframeW;
}
::d_blockReduce<T>( buffA, mem->groupSize2 );
::d_blockReduce<T>( buffB, mem->groupSize2 );
::d_blockReduce<int>( buffC, mem->groupSize2 );
//buffA is actually differenceImage not wI
//differenceImage = image - wI;
//sum(wI) = sum(image) - sum(differenceImage)
T divSumA = buffA[0] / static_cast<T>( buffC[0] );
T divSumB = buffB[0] / static_cast<T>( buffC[0] );
T assq = 0;
T bssq = 0;
T q = 0;
blob[1] = 1.0;
idpx = idx;
for( int i = 0; i < mem->frameH; i++ ) {
T va = ( mem->differenceImage[idpx] - divSumA ) * blob[mem->mask[idpx]];
assq += va * va;
T vb = ( mem->image[idpx] - divSumB ) * blob[mem->mask[idpx]];
bssq += vb * vb;
q += va * vb;
idpx += mem->fframeW;
}
buffA[threadIdx.x] = assq;
buffB[threadIdx.x] = bssq;
tbuffC[threadIdx.x] = q;
__syncthreads();
::d_blockReduce<T>( buffA, mem->groupSize2 );
::d_blockReduce<T>( buffB, mem->groupSize2 );
::d_blockReduce<T>( tbuffC, mem->groupSize2 );
mem->corr[blockIdx.x] = tbuffC[0] / sqrt( buffA[0] * buffB[0] );
}
template<typename T>
void hd_frameCorrelation( DeviceMemory<T>& d_Ptr ) {
int numPixel = d_Ptr.frameH * d_Ptr.fframeW;
int numBlocks = (numPixel + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( d_swapWIDiff<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.d_mem, numPixel );
hipLaunchKernelGGL(( d_frameCorrelation), dim3(d_Ptr.nFrames), dim3(d_Ptr.frameW), d_Ptr.frameW * sizeof( T ) * 3 , 0, d_Ptr.d_mem );
hipLaunchKernelGGL(( d_swapWIDiff<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, d_Ptr.d_mem, numPixel );
}
template void hd_globalCorrelation( DeviceMemory<float>& d_Ptr, float* corr );
template void hd_globalCorrelation( DeviceMemory<double>& d_Ptr, double* corr );
template void hd_frameCorrelation( DeviceMemory<float>& mem );
template void hd_frameCorrelation( DeviceMemory<double>& mem );
|
df30015301dcd97b36c1b376638cb22904c6ee3e.cu
|
#include "task_correlation.cuh"
template<typename T>
__device__ void d_blockReduce( T* arr, int numElements2 ) {
for( int stride = ( numElements2 >> 1 ); stride; stride >>= 1 ) {
if( threadIdx.x < stride ) {
int ui = threadIdx.x + stride;
if( ui < blockDim.x ) {
arr[threadIdx.x] += arr[ui];
}
}
__syncthreads();
}
}
template<typename T>
__global__ void d_globalCorrelation( DeviceMemory<T>* mem, T divSumA, T divSumB, T* image ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= mem->fframeW ) return;
T assq = 0;
T bssq = 0;
T q = 0;
for( int i = 0, pxId = idx; i < mem->frameH; i++ ) {
if(mem->mask[pxId]) {
T va = ( mem->differenceImage[pxId] - divSumA ); //wI[pxId]
assq += va * va;
T vb = ( image[pxId] - divSumB );
bssq += vb * vb;
q += va * vb;
pxId += mem->fframeW;
}
}
mem->qcAssq[idx] = assq;
mem->qcBssq[idx] = bssq;
mem->qcq[idx] = q;
}
template<typename T>
__global__ void d_clearImage( bool* mask, T* image, T* dest, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= N ) return;
T iv[2] = { 0, image[idx] };
dest[idx] = iv[mask[idx]];
}
template<typename T>
__global__ void d_swapWIDiff( DeviceMemory<T>* mem, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= N ) return;
if(!mem->mask[idx]) return;
//differenceImage = image - wI;
//wI = image - differenceImage
mem->differenceImage[idx] = mem->image[idx] - mem->differenceImage[idx];
}
template<typename T>
void hd_globalCorrelation( DeviceMemory<T>& d_Ptr, T* corr ) {
int numBlocks;
int numPixel = d_Ptr.frameH * d_Ptr.fframeW;
numBlocks = ( numPixel + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
d_clearImage<T><<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.mask, d_Ptr.image, d_Ptr.qcImg, numPixel );
d_swapWIDiff<T><<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.d_mem, numPixel );
thrust::device_ptr<T> qcImg = thrust::device_pointer_cast( d_Ptr.image );
thrust::device_ptr<T> qcWi = thrust::device_pointer_cast( d_Ptr.differenceImage ); //changed from d_Ptr.wI
thrust::device_ptr<bool> qcMask = thrust::device_pointer_cast( d_Ptr.mask );
cudaDeviceSynchronize();
T divSumA = thrust::reduce( qcImg, qcImg + numPixel );
T divSumB = thrust::reduce( qcWi, qcWi + numPixel );
int n = thrust::count( qcMask, qcMask + numPixel, true );
divSumA /= n;
divSumB /= n;
numBlocks = ( d_Ptr.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
d_globalCorrelation<T> <<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.d_mem, divSumA, divSumB, d_Ptr.qcImg );
thrust::device_ptr<T> dpAssq = thrust::device_pointer_cast( d_Ptr.qcAssq );
thrust::device_ptr<T> dpBssq = thrust::device_pointer_cast( d_Ptr.qcBssq );
thrust::device_ptr<T> dpQcq = thrust::device_pointer_cast( d_Ptr.qcq );
T assq = thrust::reduce( dpAssq, dpAssq + d_Ptr.fframeW );
T bssq = thrust::reduce( dpBssq, dpBssq + d_Ptr.fframeW );
T q = thrust::reduce( dpQcq, dpQcq + d_Ptr.fframeW );
*corr = q / sqrt( assq * bssq );
d_swapWIDiff<T><<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.d_mem, numPixel );
}
template<typename T>
__global__ void d_frameCorrelation( DeviceMemory<T>* mem ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ char s_mem[];
int idpx = idx;
T* buffA = reinterpret_cast<T*>( &s_mem[sizeof(T) * blockDim.x * 0] );
T* buffB = reinterpret_cast<T*>( &s_mem[sizeof(T) * blockDim.x * 1] );
int* buffC = reinterpret_cast<int*>( &s_mem[sizeof(T) * blockDim.x * 2] );
T* tbuffC = reinterpret_cast<T*>( buffC );
T blob[2]; blob[0] = 0;
buffA[threadIdx.x] = 0;
buffB[threadIdx.x] = 0;
buffC[threadIdx.x] = 0;
for( int i = 0; i < mem->frameH; i++ ) {
int used = mem->mask[idpx];
buffC[threadIdx.x] += used;
blob[1] = mem->image[idpx];
buffB[threadIdx.x] += blob[used];
blob[1] = mem->differenceImage[idpx]; //changed from wI
buffA[threadIdx.x] += blob[used];
idpx += mem->fframeW;
}
::d_blockReduce<T>( buffA, mem->groupSize2 );
::d_blockReduce<T>( buffB, mem->groupSize2 );
::d_blockReduce<int>( buffC, mem->groupSize2 );
//buffA is actually differenceImage not wI
//differenceImage = image - wI;
//sum(wI) = sum(image) - sum(differenceImage)
T divSumA = buffA[0] / static_cast<T>( buffC[0] );
T divSumB = buffB[0] / static_cast<T>( buffC[0] );
T assq = 0;
T bssq = 0;
T q = 0;
blob[1] = 1.0;
idpx = idx;
for( int i = 0; i < mem->frameH; i++ ) {
T va = ( mem->differenceImage[idpx] - divSumA ) * blob[mem->mask[idpx]];
assq += va * va;
T vb = ( mem->image[idpx] - divSumB ) * blob[mem->mask[idpx]];
bssq += vb * vb;
q += va * vb;
idpx += mem->fframeW;
}
buffA[threadIdx.x] = assq;
buffB[threadIdx.x] = bssq;
tbuffC[threadIdx.x] = q;
__syncthreads();
::d_blockReduce<T>( buffA, mem->groupSize2 );
::d_blockReduce<T>( buffB, mem->groupSize2 );
::d_blockReduce<T>( tbuffC, mem->groupSize2 );
mem->corr[blockIdx.x] = tbuffC[0] / sqrt( buffA[0] * buffB[0] );
}
template<typename T>
void hd_frameCorrelation( DeviceMemory<T>& d_Ptr ) {
int numPixel = d_Ptr.frameH * d_Ptr.fframeW;
int numBlocks = (numPixel + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
d_swapWIDiff<T><<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.d_mem, numPixel );
d_frameCorrelation<<< d_Ptr.nFrames, d_Ptr.frameW, d_Ptr.frameW * sizeof( T ) * 3 >>>( d_Ptr.d_mem );
d_swapWIDiff<T><<< numBlocks, THREADS_PER_BLOCK >>>( d_Ptr.d_mem, numPixel );
}
template void hd_globalCorrelation( DeviceMemory<float>& d_Ptr, float* corr );
template void hd_globalCorrelation( DeviceMemory<double>& d_Ptr, double* corr );
template void hd_frameCorrelation( DeviceMemory<float>& mem );
template void hd_frameCorrelation( DeviceMemory<double>& mem );
|
80162fc8eb81b39cde83b66f8dd0d07705cc5cc7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mapScan.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_array = NULL;
hipMalloc(&d_array, XSIZE*YSIZE);
unsigned int *d_total = NULL;
hipMalloc(&d_total, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mapScan), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_total,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mapScan), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_total,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mapScan), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_total,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
80162fc8eb81b39cde83b66f8dd0d07705cc5cc7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mapScan.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_array = NULL;
cudaMalloc(&d_array, XSIZE*YSIZE);
unsigned int *d_total = NULL;
cudaMalloc(&d_total, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mapScan<<<gridBlock,threadBlock>>>(d_array,d_total,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mapScan<<<gridBlock,threadBlock>>>(d_array,d_total,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mapScan<<<gridBlock,threadBlock>>>(d_array,d_total,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
785d2f03e625367f6cd1f52d7e22038a3f1ba6c1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* EXAMPLE OF MAPPING THREADS TO MULTIDIMENSIONAL DATA: CHAPTER 3
*/
#include"lodepng.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define CHANNELS 4
// input image is encoded as unsigned characters [0,255]
__global__
void colorToGrayscaleConversionKernel(unsigned char *Pin, unsigned char *Pout, int width, int height) {
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check that only the threads with both Row and Col values are in within range
if ( Col < width && Row < height) {
// get 1D coordine for the grayscale image
int greyOffset = Row * width + Col;
// one can think to RGB image having
// CHANNEL times columns than the grayscale image
int rgbOffset = greyOffset * CHANNELS;
unsigned char r = Pin[rgbOffset ]; // red value for pixel
unsigned char g = Pin[rgbOffset + 1]; // green value for pixel
unsigned char b = Pin[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// we multiply by floating point constants
Pout[rgbOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+1] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+2] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+3] = 255;
}
}
void colorToGrayscaleConversion(unsigned char *h_Pin, unsigned char *h_Pout, int m, int n) {
int size = (m*n*4)*sizeof(unsigned char);
unsigned char *d_Pin, *d_Pout;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(hipMalloc((void**)&d_Pin, size));
CHECK_ERROR(hipMalloc((void**)&d_Pout, size));
// copy h_Pin to device memory
hipMemcpy(d_Pin, h_Pin, size, hipMemcpyHostToDevice);
//2. Kernel launch code - with 256 threads per block
dim3 dimGrid(ceil(m / 16.0),ceil(n / 16.0),1);
dim3 dimBlock(16, 16,1);
hipLaunchKernelGGL(( colorToGrayscaleConversionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Pin, d_Pout, m, n);
//3. copy d_Pout from the device memory
printf("coping d_Pout from the device memory to host..\n");
hipMemcpy(h_Pout, d_Pout, size, hipMemcpyDeviceToHost);
printf("copied..\n");
// Free device vectors
hipFree(d_Pin);
hipFree(d_Pout);
}
/*
Decode from disk to raw pixels
*/
unsigned char* decodeOneStep(const char* filename)
{
unsigned error;
unsigned char* image;
unsigned width, height;
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
return image;
}
/*
Encode from raw pixels to disk with a single function call
The image argument has width * height RGBA pixels or width * height * 4 bytes
*/
void encodeOneStep(const char* filename, unsigned char* image, int width, int height)
{
/*Encode the image*/
unsigned error = lodepng_encode32_file(filename, image, width, height);
/*if there's an error, display it*/
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
}
int main(int argc, char *argv[]) {
/* argv[1] must be the name of the image file */
if (argc != 2) {
printf("Usage: ./<executable_file>.x <name_of_image_file>\n");
exit(1);
}
const char *filename = argv[1];
// create host vectors
unsigned char *h_Pin, *h_Pout;
int m = 512; // track the pixel in x direction
int n = 512; // track the pixel in y direction
// allocate memory for host vectors
h_Pin = (unsigned char*)malloc(sizeof(unsigned char)*(n*m));
h_Pout = (unsigned char*)malloc(sizeof(unsigned char)*(n*m*4));
// decode the .png image
printf("decoding image...\n");
h_Pin = decodeOneStep(filename);
printf("colorToGrayscaleConversion...\n");
//GpuTimer timer;
//timer.Start();
colorToGrayscaleConversion(h_Pin, h_Pout, m, n);
//timer.Stop();
printf("encoding converted image...\n");
encodeOneStep("image_converted.png", h_Pout, m, n);
printf("ok conversion completed with success!\n");
// Free host memory
free(h_Pin);
free(h_Pout);
return 0;
}
|
785d2f03e625367f6cd1f52d7e22038a3f1ba6c1.cu
|
/*
* EXAMPLE OF MAPPING THREADS TO MULTIDIMENSIONAL DATA: CHAPTER 3
*/
#include"lodepng.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <math.h>
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define CHANNELS 4
// input image is encoded as unsigned characters [0,255]
__global__
void colorToGrayscaleConversionKernel(unsigned char *Pin, unsigned char *Pout, int width, int height) {
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check that only the threads with both Row and Col values are in within range
if ( Col < width && Row < height) {
// get 1D coordine for the grayscale image
int greyOffset = Row * width + Col;
// one can think to RGB image having
// CHANNEL times columns than the grayscale image
int rgbOffset = greyOffset * CHANNELS;
unsigned char r = Pin[rgbOffset ]; // red value for pixel
unsigned char g = Pin[rgbOffset + 1]; // green value for pixel
unsigned char b = Pin[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// we multiply by floating point constants
Pout[rgbOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+1] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+2] = 0.21f*r + 0.71f*g + 0.07f*b;
Pout[rgbOffset+3] = 255;
}
}
void colorToGrayscaleConversion(unsigned char *h_Pin, unsigned char *h_Pout, int m, int n) {
int size = (m*n*4)*sizeof(unsigned char);
unsigned char *d_Pin, *d_Pout;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(cudaMalloc((void**)&d_Pin, size));
CHECK_ERROR(cudaMalloc((void**)&d_Pout, size));
// copy h_Pin to device memory
cudaMemcpy(d_Pin, h_Pin, size, cudaMemcpyHostToDevice);
//2. Kernel launch code - with 256 threads per block
dim3 dimGrid(ceil(m / 16.0),ceil(n / 16.0),1);
dim3 dimBlock(16, 16,1);
colorToGrayscaleConversionKernel<<<dimGrid, dimBlock>>>(d_Pin, d_Pout, m, n);
//3. copy d_Pout from the device memory
printf("coping d_Pout from the device memory to host..\n");
cudaMemcpy(h_Pout, d_Pout, size, cudaMemcpyDeviceToHost);
printf("copied..\n");
// Free device vectors
cudaFree(d_Pin);
cudaFree(d_Pout);
}
/*
Decode from disk to raw pixels
*/
unsigned char* decodeOneStep(const char* filename)
{
unsigned error;
unsigned char* image;
unsigned width, height;
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
return image;
}
/*
Encode from raw pixels to disk with a single function call
The image argument has width * height RGBA pixels or width * height * 4 bytes
*/
void encodeOneStep(const char* filename, unsigned char* image, int width, int height)
{
/*Encode the image*/
unsigned error = lodepng_encode32_file(filename, image, width, height);
/*if there's an error, display it*/
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
}
int main(int argc, char *argv[]) {
/* argv[1] must be the name of the image file */
if (argc != 2) {
printf("Usage: ./<executable_file>.x <name_of_image_file>\n");
exit(1);
}
const char *filename = argv[1];
// create host vectors
unsigned char *h_Pin, *h_Pout;
int m = 512; // track the pixel in x direction
int n = 512; // track the pixel in y direction
// allocate memory for host vectors
h_Pin = (unsigned char*)malloc(sizeof(unsigned char)*(n*m));
h_Pout = (unsigned char*)malloc(sizeof(unsigned char)*(n*m*4));
// decode the .png image
printf("decoding image...\n");
h_Pin = decodeOneStep(filename);
printf("colorToGrayscaleConversion...\n");
//GpuTimer timer;
//timer.Start();
colorToGrayscaleConversion(h_Pin, h_Pout, m, n);
//timer.Stop();
printf("encoding converted image...\n");
encodeOneStep("image_converted.png", h_Pout, m, n);
printf("ok conversion completed with success!\n");
// Free host memory
free(h_Pin);
free(h_Pout);
return 0;
}
|
ef1fca28518038af0aa54782d1404ccc65b63514.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpusort.h"
#include "funcs.cu"
int median(int a, int b, int c) {
if(a < b) {
if(b < c) return b;
else if(a < c) return c;
else return a;
} else {
if(c > a) return a;
else if(c > b) return c;
else return b;
}
}
int Init() {
if(hipHostMalloc((void **)&workset, MAXBLOCKS*sizeof(sequence)) != hipSuccess) return -1;
if(hipHostMalloc((void **)&doneset, MAXBLOCKS*sizeof(sequence)) != hipSuccess) return -2;
if(hipHostMalloc((void **)¶ms, MAXBLOCKS*sizeof(param)) != hipSuccess) return -3;
if(hipHostMalloc((void **)&plist, sizeof(list)) != hipSuccess) return -4;
if(hipMalloc((void **)&dparams, MAXBLOCKS*sizeof(param)) != hipSuccess) return -5;
if(hipMalloc((void **)&dplist, sizeof(list)) != hipSuccess) return -6;
return 0;
}
void GPUSORT(int size, int *array, int *darray1, int *darray2) {
bool flip = true;
int worksize = 1, donesize = 0, paramsize = 0, totsize = size;
int pivot = workset[0].pivot = median(darray1[0], darray1[size/2], darray1[size-1]), index;
while(worksize > 0 && worksize + donesize < MAXSEQ) {
int blocksize = totsize / MAXSEQ;
for(int i = 0; i < worksize; i ++) {
if(workset[i].end - workset[i].begin < size/MAXSEQ) continue;
int blockcount = (totsize + blocksize)/blocksize;
int parent = i, bstart;
for(int j = 0; j < blockcount; j ++) {
bstart = workset[i].start + j * blocksize;
params[paramsize].begin = bstart;
params[paramsize].end = bstart + blocksize;
params[paramsize].pivot = workset[i].pivot;
params[paramsize].parent = i;
params[paramsize].last = false;
paramsize ++;
}
params[paramsize-1].end = workset[i].end;
params[paramsize-1].last = true;
}
hipMemcpy(dparams, params, paramsize * sizeof(param), hipMemcpyHostToDevice);
GQSORT1<<< TODO >>>(dparams, TODO);
hipMemcpy(plist, dplist, sizeof(list), hipMemcpyDeviceToHost);
for(int i = 0; i < paramsize; i ++) {
int l = plist->blockleft[i];
int r = plist->blockright[i];
plist->blockleft[i] = workset[params[i].parent].begin;
plist->blockright[i] = workset[parms[i].parent].end;
workset[params[i].parent].begin += l;
workset[params[i].parent].end -= r;
workset[params[i].parent].maxrpiv = max(workset[params[i].parent].maxrpiv, plist->blockmax[i]);
workset[params[i].parent].minlpiv = min(workset[params[i].parent].minlpiv, plist->blockmin[i]);
workset[params[i].parent].maxlpiv = min(workset[params[i].parent].maxlpiv, workset[params[i].parent].pivot);
workset[params[i].parent].minrpiv = max(workset[params[i].parent].minrpiv, workset[params[i].parent].pivot);
}
GQSORT2<<< TODO >>>(dparams, TODO);
flip = !flip;
int oldworksize = worksize, *darray = flip ? darray1 : darray2, b, e;
totsize = 0, paramsize =0, worksize = 0;
for(int i = 0; i < oldworksize; i ++) {
if(workset[i].begin - workset[i].orgbegin < size/MAXSEQ) {
b = doneset[donesize].begin = workset[i].orgbegin;
e = doneset[donesize].end = workset[i].begin;
doneset[donesize].pivot = (workset[i].maxlpiv + workset[i].minlpiv)/2;
doneset[donesize].flip = flip;
donesize ++;
} else {
totsize += workset[i].begin - workset[i].orgbegin;
b = params[worksize].begin = workset[i].begin;
e = params[worksize].end = workset[i].end;
params[worksize].pivot = (workset[i].maxlpiv + workset[i].minpiv)/2;
worksize ++;
}
if(workset[i].orgend - workset[i].end < size/MAXSEQ) {
b = doneset[donesize].begin = workset[i].end;
e = doneset[donesize].end = workset[i].orgend;
doneset[donesize].pivot = (workset[i].maxlpiv + workset[i].minlpiv)/2;
doneset[donesize].flip = flip;
donesize ++;
} else {
totsize += workset[i].end - workset[i].orgend;
b = params[worksize].begin = workset[i].end;
e = params[worksize].end = workset[i].orgend;
params[worksize].pivot = (workset[i].maxlpiv + workset[i].minpiv)/2;
worksize ++;
}
}
for(int i = 0; i < worksize; i ++) {
workset[i].orgbegin = workset[i].begin = params[i].begin;
workset[i].orgend = workset[i].end = params[i].begin;
workset[i].pivot = params[i].pivot;
workset[i].flip = flip;
}
}
int lqparamsize = 0;
for(int i = 0; i < worksize; i ++) {
lqparams[lqparamsize].begin = workset[i].begin;
lqparams[lqparamsize].end = workset[i].end;
lqparams[lqparamsize].flip = workset[i].flip;
lqparams[lqparamsize].sbsize = sbsize;
lqparamsize ++;
}
for(int i = 0; i < donesize; i ++) {
lqparams[lqparamsize].begin = doneset[i].begin;
lqparams[lqparamsize].end = doneset[i].end;
lqparams[lqparamsize].flip = doneset[i].flip;
lqparams[lqparamsize].sbsize = sbsize;
lqparamsize ++;
}
hipMemcpy(dlqparams, lqparams, lqparamsize * sizeof(lqparam), hipMemcpyHostToDevice);
LQSORT<<< TODO >>>(dlqparams, TODO);
hipMemcpy(lqparams, dlqparams, lqparamsize * sizeof(lqparam), hipMemcpyDeviceToHost);
}
void Destroy() {
hipHostFree(workset);
hipHostFree(doneset);
hipHostFree(params);
hipFree(dparams);
hipFree(dplist);
}
|
ef1fca28518038af0aa54782d1404ccc65b63514.cu
|
#include "gpusort.h"
#include "funcs.cu"
int median(int a, int b, int c) {
if(a < b) {
if(b < c) return b;
else if(a < c) return c;
else return a;
} else {
if(c > a) return a;
else if(c > b) return c;
else return b;
}
}
int Init() {
if(cudaMallocHost((void **)&workset, MAXBLOCKS*sizeof(sequence)) != cudaSuccess) return -1;
if(cudaMallocHost((void **)&doneset, MAXBLOCKS*sizeof(sequence)) != cudaSuccess) return -2;
if(cudaMallocHost((void **)¶ms, MAXBLOCKS*sizeof(param)) != cudaSuccess) return -3;
if(cudaMallocHost((void **)&plist, sizeof(list)) != cudaSuccess) return -4;
if(cudaMalloc((void **)&dparams, MAXBLOCKS*sizeof(param)) != cudaSuccess) return -5;
if(cudaMalloc((void **)&dplist, sizeof(list)) != cudaSuccess) return -6;
return 0;
}
void GPUSORT(int size, int *array, int *darray1, int *darray2) {
bool flip = true;
int worksize = 1, donesize = 0, paramsize = 0, totsize = size;
int pivot = workset[0].pivot = median(darray1[0], darray1[size/2], darray1[size-1]), index;
while(worksize > 0 && worksize + donesize < MAXSEQ) {
int blocksize = totsize / MAXSEQ;
for(int i = 0; i < worksize; i ++) {
if(workset[i].end - workset[i].begin < size/MAXSEQ) continue;
int blockcount = (totsize + blocksize)/blocksize;
int parent = i, bstart;
for(int j = 0; j < blockcount; j ++) {
bstart = workset[i].start + j * blocksize;
params[paramsize].begin = bstart;
params[paramsize].end = bstart + blocksize;
params[paramsize].pivot = workset[i].pivot;
params[paramsize].parent = i;
params[paramsize].last = false;
paramsize ++;
}
params[paramsize-1].end = workset[i].end;
params[paramsize-1].last = true;
}
cudaMemcpy(dparams, params, paramsize * sizeof(param), cudaMemcpyHostToDevice);
GQSORT1<<< TODO >>>(dparams, TODO);
cudaMemcpy(plist, dplist, sizeof(list), cudaMemcpyDeviceToHost);
for(int i = 0; i < paramsize; i ++) {
int l = plist->blockleft[i];
int r = plist->blockright[i];
plist->blockleft[i] = workset[params[i].parent].begin;
plist->blockright[i] = workset[parms[i].parent].end;
workset[params[i].parent].begin += l;
workset[params[i].parent].end -= r;
workset[params[i].parent].maxrpiv = max(workset[params[i].parent].maxrpiv, plist->blockmax[i]);
workset[params[i].parent].minlpiv = min(workset[params[i].parent].minlpiv, plist->blockmin[i]);
workset[params[i].parent].maxlpiv = min(workset[params[i].parent].maxlpiv, workset[params[i].parent].pivot);
workset[params[i].parent].minrpiv = max(workset[params[i].parent].minrpiv, workset[params[i].parent].pivot);
}
GQSORT2<<< TODO >>>(dparams, TODO);
flip = !flip;
int oldworksize = worksize, *darray = flip ? darray1 : darray2, b, e;
totsize = 0, paramsize =0, worksize = 0;
for(int i = 0; i < oldworksize; i ++) {
if(workset[i].begin - workset[i].orgbegin < size/MAXSEQ) {
b = doneset[donesize].begin = workset[i].orgbegin;
e = doneset[donesize].end = workset[i].begin;
doneset[donesize].pivot = (workset[i].maxlpiv + workset[i].minlpiv)/2;
doneset[donesize].flip = flip;
donesize ++;
} else {
totsize += workset[i].begin - workset[i].orgbegin;
b = params[worksize].begin = workset[i].begin;
e = params[worksize].end = workset[i].end;
params[worksize].pivot = (workset[i].maxlpiv + workset[i].minpiv)/2;
worksize ++;
}
if(workset[i].orgend - workset[i].end < size/MAXSEQ) {
b = doneset[donesize].begin = workset[i].end;
e = doneset[donesize].end = workset[i].orgend;
doneset[donesize].pivot = (workset[i].maxlpiv + workset[i].minlpiv)/2;
doneset[donesize].flip = flip;
donesize ++;
} else {
totsize += workset[i].end - workset[i].orgend;
b = params[worksize].begin = workset[i].end;
e = params[worksize].end = workset[i].orgend;
params[worksize].pivot = (workset[i].maxlpiv + workset[i].minpiv)/2;
worksize ++;
}
}
for(int i = 0; i < worksize; i ++) {
workset[i].orgbegin = workset[i].begin = params[i].begin;
workset[i].orgend = workset[i].end = params[i].begin;
workset[i].pivot = params[i].pivot;
workset[i].flip = flip;
}
}
int lqparamsize = 0;
for(int i = 0; i < worksize; i ++) {
lqparams[lqparamsize].begin = workset[i].begin;
lqparams[lqparamsize].end = workset[i].end;
lqparams[lqparamsize].flip = workset[i].flip;
lqparams[lqparamsize].sbsize = sbsize;
lqparamsize ++;
}
for(int i = 0; i < donesize; i ++) {
lqparams[lqparamsize].begin = doneset[i].begin;
lqparams[lqparamsize].end = doneset[i].end;
lqparams[lqparamsize].flip = doneset[i].flip;
lqparams[lqparamsize].sbsize = sbsize;
lqparamsize ++;
}
cudaMemcpy(dlqparams, lqparams, lqparamsize * sizeof(lqparam), cudaMemcpyHostToDevice);
LQSORT<<< TODO >>>(dlqparams, TODO);
cudaMemcpy(lqparams, dlqparams, lqparamsize * sizeof(lqparam), cudaMemcpyDeviceToHost);
}
void Destroy() {
cudaFreeHost(workset);
cudaFreeHost(doneset);
cudaFreeHost(params);
cudaFree(dparams);
cudaFree(dplist);
}
|
1be62b6523042e3a85bb5f0e39a92d3d09d8580f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 64
#define N 32
#define H 224
#define W 224
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[128];
__shared__ float pad_temp_shared[2088];
__shared__ float kernel_shared[288];
float pad_temp_shared_local[48];
float kernel_shared_local[12];
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
for (int yy_c_init = 0; yy_c_init < 4; ++yy_c_init) {
compute_local[(((ff_c_init * 4) + yy_c_init))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 64))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 8))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 72))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 16))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 80))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 24))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 88))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 32))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 96))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 40))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 104))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 48))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 112))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 56))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 120))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 19; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) < 116) {
if (((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2088) {
if ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 522) {
pad_temp_shared[(((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58))) && (((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58)) < 225)) && (1 <= ((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)))) && (((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) < 225)) ? data[((((((((rc_outer * 100352) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) / 58) * 50176)) + (((int)blockIdx.y) * 12544)) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58) * 224)) + (((int)blockIdx.x) * 16)) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) - 225))] : 0.000000e+00f);
}
}
}
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 16) {
if (((((int)threadIdx.z) * 8) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 3)) < 32) {
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) < 96) {
if (((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) {
if ((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 72) {
if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 32) {
kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6) * 576)) + (rc_outer * 18)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) % 6) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))];
}
}
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) {
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
for (int ax2 = 0; ax2 < 6; ++ax2) {
pad_temp_shared_local[(ax2)] = pad_temp_shared[((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer))];
pad_temp_shared_local[((ax2 + 6))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax2 + 12))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 4))];
pad_temp_shared_local[((ax2 + 18))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 6))];
pad_temp_shared_local[((ax2 + 24))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 8))];
pad_temp_shared_local[((ax2 + 30))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 10))];
pad_temp_shared_local[((ax2 + 36))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 12))];
pad_temp_shared_local[((ax2 + 42))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 14))];
}
for (int ax0 = 0; ax0 < 2; ++ax0) {
for (int ax21 = 0; ax21 < 3; ++ax21) {
kernel_shared_local[(((ax0 * 3) + ax21))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer))];
kernel_shared_local[((((ax0 * 3) + ax21) + 6))] = kernel_shared[(((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 144))];
}
}
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
for (int ff_c = 0; ff_c < 2; ++ff_c) {
for (int yy_c = 0; yy_c < 4; ++yy_c) {
compute_local[(((ff_c * 4) + yy_c))] = (compute_local[(((ff_c * 4) + yy_c))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 64))] = (compute_local[((((ff_c * 4) + yy_c) + 64))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 8))] = (compute_local[((((ff_c * 4) + yy_c) + 8))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 72))] = (compute_local[((((ff_c * 4) + yy_c) + 72))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 16))] = (compute_local[((((ff_c * 4) + yy_c) + 16))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 80))] = (compute_local[((((ff_c * 4) + yy_c) + 80))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 24))] = (compute_local[((((ff_c * 4) + yy_c) + 24))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 88))] = (compute_local[((((ff_c * 4) + yy_c) + 88))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 32))] = (compute_local[((((ff_c * 4) + yy_c) + 32))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 96))] = (compute_local[((((ff_c * 4) + yy_c) + 96))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 40))] = (compute_local[((((ff_c * 4) + yy_c) + 40))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 104))] = (compute_local[((((ff_c * 4) + yy_c) + 104))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 48))] = (compute_local[((((ff_c * 4) + yy_c) + 48))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 112))] = (compute_local[((((ff_c * 4) + yy_c) + 112))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 56))] = (compute_local[((((ff_c * 4) + yy_c) + 56))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 120))] = (compute_local[((((ff_c * 4) + yy_c) + 120))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
}
}
}
}
}
}
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 4; ++yy_inner_inner_inner) {
compute[(((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)))] = compute_local[(((ff_inner_inner_inner * 4) + yy_inner_inner_inner))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401408))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 64))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 2))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 8))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401410))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 72))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 4))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 16))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401412))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 80))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 6))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 24))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401414))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 88))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 8))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 32))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401416))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 96))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 10))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 40))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401418))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 104))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 12))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 48))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401420))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 112))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 14))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 56))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401422))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 120))];
}
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,4,2);
dim3 block(2,14,4);
float * paddedInputDevice;
chkerr(hipMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(hipMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), hipMemcpyHostToDevice));
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
1be62b6523042e3a85bb5f0e39a92d3d09d8580f.cu
|
#include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 64
#define N 32
#define H 224
#define W 224
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[128];
__shared__ float pad_temp_shared[2088];
__shared__ float kernel_shared[288];
float pad_temp_shared_local[48];
float kernel_shared_local[12];
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
for (int yy_c_init = 0; yy_c_init < 4; ++yy_c_init) {
compute_local[(((ff_c_init * 4) + yy_c_init))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 64))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 8))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 72))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 16))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 80))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 24))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 88))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 32))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 96))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 40))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 104))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 48))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 112))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 56))] = 0.000000e+00f;
compute_local[((((ff_c_init * 4) + yy_c_init) + 120))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 19; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) < 116) {
if (((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2088) {
if ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 522) {
pad_temp_shared[(((((((int)threadIdx.z) * 522) + (((int)threadIdx.y) * 38)) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58))) && (((((int)blockIdx.y) * 56) + (((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58)) < 225)) && (1 <= ((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)))) && (((((int)blockIdx.x) * 16) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) < 225)) ? data[((((((((rc_outer * 100352) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) / 58) * 50176)) + (((int)blockIdx.y) * 12544)) + ((((((int)threadIdx.z) * 29) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 58) * 224)) + (((int)blockIdx.x) * 16)) + ((((((int)threadIdx.y) * 38) + (((int)threadIdx.x) * 19)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) - 225))] : 0.000000e+00f);
}
}
}
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 16) {
if (((((int)threadIdx.z) * 8) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 3)) < 32) {
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 2)) + ((int)threadIdx.x)) < 96) {
if (((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) {
if ((((((int)threadIdx.y) * 6) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 72) {
if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6)) < 32) {
kernel_shared[(((((((int)threadIdx.z) * 72) + (((int)threadIdx.y) * 6)) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 6) * 576)) + (rc_outer * 18)) + ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) % 6) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))];
}
}
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 2; ++rc_inner_outer) {
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
for (int ax2 = 0; ax2 < 6; ++ax2) {
pad_temp_shared_local[(ax2)] = pad_temp_shared[((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer))];
pad_temp_shared_local[((ax2 + 6))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax2 + 12))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 4))];
pad_temp_shared_local[((ax2 + 18))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 6))];
pad_temp_shared_local[((ax2 + 24))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 8))];
pad_temp_shared_local[((ax2 + 30))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 10))];
pad_temp_shared_local[((ax2 + 36))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 12))];
pad_temp_shared_local[((ax2 + 42))] = pad_temp_shared[(((((((rc_inner_outer * 1044) + (((int)threadIdx.y) * 72)) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer) + 14))];
}
for (int ax0 = 0; ax0 < 2; ++ax0) {
for (int ax21 = 0; ax21 < 3; ++ax21) {
kernel_shared_local[(((ax0 * 3) + ax21))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer))];
kernel_shared_local[((((ax0 * 3) + ax21) + 6))] = kernel_shared[(((((((((int)threadIdx.z) * 36) + (ax0 * 18)) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 144))];
}
}
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
for (int ff_c = 0; ff_c < 2; ++ff_c) {
for (int yy_c = 0; yy_c < 4; ++yy_c) {
compute_local[(((ff_c * 4) + yy_c))] = (compute_local[(((ff_c * 4) + yy_c))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 64))] = (compute_local[((((ff_c * 4) + yy_c) + 64))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 8))] = (compute_local[((((ff_c * 4) + yy_c) + 8))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 72))] = (compute_local[((((ff_c * 4) + yy_c) + 72))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 6))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 16))] = (compute_local[((((ff_c * 4) + yy_c) + 16))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 80))] = (compute_local[((((ff_c * 4) + yy_c) + 80))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 12))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 24))] = (compute_local[((((ff_c * 4) + yy_c) + 24))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 88))] = (compute_local[((((ff_c * 4) + yy_c) + 88))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 18))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 32))] = (compute_local[((((ff_c * 4) + yy_c) + 32))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 96))] = (compute_local[((((ff_c * 4) + yy_c) + 96))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 24))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 40))] = (compute_local[((((ff_c * 4) + yy_c) + 40))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 104))] = (compute_local[((((ff_c * 4) + yy_c) + 104))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 30))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 48))] = (compute_local[((((ff_c * 4) + yy_c) + 48))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 112))] = (compute_local[((((ff_c * 4) + yy_c) + 112))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 36))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
compute_local[((((ff_c * 4) + yy_c) + 56))] = (compute_local[((((ff_c * 4) + yy_c) + 56))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[(((ff_c * 3) + ry_inner_inner))]));
compute_local[((((ff_c * 4) + yy_c) + 120))] = (compute_local[((((ff_c * 4) + yy_c) + 120))] + (pad_temp_shared_local[(((yy_c + ry_inner_inner) + 42))] * kernel_shared_local[((((ff_c * 3) + ry_inner_inner) + 6))]));
}
}
}
}
}
}
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 4; ++yy_inner_inner_inner) {
compute[(((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)))] = compute_local[(((ff_inner_inner_inner * 4) + yy_inner_inner_inner))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401408))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 64))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 2))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 8))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401410))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 72))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 4))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 16))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401412))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 80))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 6))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 24))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401414))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 88))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 8))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 32))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401416))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 96))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 10))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 40))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401418))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 104))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 12))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 48))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401420))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 112))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 14))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 56))];
compute[((((((((((((int)blockIdx.z) * 802816) + (((int)threadIdx.z) * 100352)) + (ff_inner_inner_inner * 50176)) + (((int)blockIdx.y) * 12544)) + (((int)threadIdx.y) * 896)) + (yy_inner_inner_inner * 224)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 401422))] = compute_local[((((ff_inner_inner_inner * 4) + yy_inner_inner_inner) + 120))];
}
}
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,4,2);
dim3 block(2,14,4);
float * paddedInputDevice;
chkerr(cudaMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(cudaMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
f86b0797840e717595adaa3091f08d1608561a96.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "cugraph.h"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_row_sum(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work) {
IdxType row, start, end, length;
ValType sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
}
else {
work[row] = (ValType) length;
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, j, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z;
row < n;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y;
j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = min(work[row], work[col]);
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is_pairs(IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, idx, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z;
idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = min(work[row], work[col]);
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x;
i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_jw(IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
IdxType j;
ValType Wi, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x;
j < e;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Wu = weight_s[j];
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename IdxType, typename ValType>
int overlap(IdxType n,
IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( overlap_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
fill(e, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
hipLaunchKernelGGL(( overlap_is<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( overlap_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, e,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename IdxType, typename ValType>
int overlap_pairs(IdxType n,
IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( overlap_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
fill(num_pairs, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
hipLaunchKernelGGL(( overlap_is_pairs<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( overlap_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace
void overlap(Graph *graph, gdf_column *weights, gdf_column *result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<true, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<false, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<true, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<false, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<true, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<false, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<true, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<false, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
void overlap_list(Graph* graph,
gdf_column* weights,
gdf_column* first,
gdf_column* second,
gdf_column* result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(first->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!first->valid, "Column must be valid");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(second->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!second->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS(first->dtype == IndexType, "Invalid API parameter");
CUGRAPH_EXPECTS(second->dtype == IndexType, "Invalid API parameter");
void *first_pair = first->data;
void *second_pair = second->data;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<true, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<false, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<true, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<false, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<true, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<false, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<true, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<false, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
}
|
f86b0797840e717595adaa3091f08d1608561a96.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "cugraph.h"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_row_sum(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work) {
IdxType row, start, end, length;
ValType sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
}
else {
work[row] = (ValType) length;
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, j, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z;
row < n;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y;
j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = min(work[row], work[col]);
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_is_pairs(IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, idx, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z;
idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = min(work[row], work[col]);
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x;
i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
overlap_jw(IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
IdxType j;
ValType Wi, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x;
j < e;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Wu = weight_s[j];
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename IdxType, typename ValType>
int overlap(IdxType n,
IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
overlap_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
fill(e, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
overlap_is<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
overlap_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(e,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename IdxType, typename ValType>
int overlap_pairs(IdxType n,
IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
overlap_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
fill(num_pairs, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
overlap_is_pairs<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
overlap_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace
void overlap(Graph *graph, gdf_column *weights, gdf_column *result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<true, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<false, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<true, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<false, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<true, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap<false, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<true, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap<false, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
void overlap_list(Graph* graph,
gdf_column* weights,
gdf_column* first,
gdf_column* second,
gdf_column* result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(first->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!first->valid, "Column must be valid");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(second->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!second->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS(first->dtype == IndexType, "Invalid API parameter");
CUGRAPH_EXPECTS(second->dtype == IndexType, "Invalid API parameter");
void *first_pair = first->data;
void *second_pair = second->data;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<true, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<false, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<true, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<false, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<true, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::overlap_pairs<false, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<true, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::overlap_pairs<false, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
}
|
c0779b1aceb327823aec52af213d91d1a172b781.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdbool.h>
#include <stdio.h>
#include "nn_interpolation_kernel.h"
const int bdx =16;
const int bdy = 16;
//const int bdz =4;
#define real float
__device__ void getRound(float x, int width, int& point)
{
float xcoord = (x + 1) * (width - 1) / 2;
point = round(xcoord);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__global__ void nearestNeighFromGrid_1D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideChannels, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideWidth,
int inputImages_channels, int inputImages_width, int output_width)
{
const int wOut = blockIdx.x*blockDim.x+threadIdx.x;
const bool withinImageBounds = wOut < output_width; // asume the size of input is the same as the output
const int b = blockIdx.y;
float xf=0;
if(withinImageBounds){
int grid_address = b*grids_strideBatch + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address];
}
else
return;
int xInRound;
getRound(xf, inputImages_width, xInRound);
const int outAddress = output_strideBatch * b + output_strideWidth * wOut;
const int inRoundAddress = inputImages_strideBatch * b + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
__global__ void nearestNeighFromGrid_2D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_height, int output_width)
{
const int wOut = blockIdx.y*blockDim.y+threadIdx.y;
const int hOut = blockIdx.x*blockDim.x+threadIdx.x;
const bool withinImageBounds = wOut < output_width && hOut < output_height; // asume the size of input is the same as the output
const int b = blockIdx.z;
float yf=0;
float xf=0;
if(withinImageBounds){
int grid_address =b*grids_strideBatch + hOut*grids_strideHeight + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address];
yf = grids_data[grid_address + grids_strideYX]; // address of the 1st channel
}
else
return;
int yInRound, xInRound;
getRound(xf, inputImages_width, xInRound);
getRound(yf, inputImages_height, yInRound);
const int outAddress = output_strideBatch * b + output_strideHeight * hOut + output_strideWidth * wOut;
const int inRoundAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInRound + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1) && between(yInRound, 0, inputImages_height-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
__global__ void nearestNeighFromGrid_3D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels,
int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideChannels,
int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels,
int output_strideDepth, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width,
int outputImages_depth, int outputImages_height, int outputImages_width)
{
const int wOut = blockIdx.z % (outputImages_width);
const int hOut = blockIdx.y*blockDim.y + threadIdx.y;
const int dOut = blockIdx.x*blockDim.x + threadIdx.x;
const bool withinImageBounds = dOut < outputImages_depth && hOut < outputImages_height; // asume the size of input is the same as the output
const int batchIdx = blockIdx.z /(outputImages_width);
float zf=0;
float yf=0;
float xf=0;
if(withinImageBounds){
int grid_address = batchIdx*grids_strideBatch + dOut*grids_strideDepth + hOut*grids_strideHeight + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address]; // changed zf yf xz to xf yf zf, to adpat the cpu version, assume in grid the 3*1 vector is stored as width,height and depth
yf = grids_data[grid_address + grids_strideChannels];
zf = grids_data[grid_address + grids_strideChannels*2]; // address of the 1st channel
}
else
return;
int zInRound, yInRound, xInRound; // zInTopFrontRound
getRound(xf, inputImages_width, xInRound);
getRound(yf, inputImages_height, yInRound);
getRound(zf, inputImages_depth, zInRound);
const int outAddress = output_strideBatch * batchIdx + output_strideDepth * dOut + output_strideHeight * hOut + output_strideWidth * wOut; // here assume the channel will be calculated later
const int inRoundAddress = inputImages_strideBatch * batchIdx + inputImages_strideDepth*zInRound+ inputImages_strideHeight * yInRound + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1) && between(yInRound, 0, inputImages_height-1)&& between(zInRound, 0, inputImages_depth-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
#ifdef __cplusplus
extern "C" {
#endif
int nearestNeighBCW_updateOutput_cuda_kernel_1D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int sz3,
/*THCudaTensor_size(state, inputImages, 3)*/int ic,
/*THCudaTensor_size(state, inputImages, 1)*/int iw,
/*THCudaTensor_size(state, output, 2)*/int ow,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw,
/*THCState_getCurrentStream(state)*/hipStream_t stream)
{
// batch channel x y
// 0 1 2 3
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((ow+bdx-1)/bdx, sz3);
dim3 threads(bdx);
/* assume BHWD */
hipLaunchKernelGGL(( nearestNeighFromGrid_1D) , dim3(blocks), dim3(threads), 0, /*THCState_getCurrentStream(state)*/stream ,
/*THCudaTensor_data(state, inputImages)*/inputImages,
/*THCudaTensor_stride(state, inputImages, 0)*/isb,
/*THCudaTensor_stride(state, inputImages, 3)*/isc,
/*THCudaTensor_stride(state, inputImages, 2)*/isw,
/*THCudaTensor_data(state, grids)*/grids,
/*THCudaTensor_stride(state, grids, 0)*/gsb,
/*THCudaTensor_stride(state, grids, 3)*/gsc,
/*THCudaTensor_stride(state, grids, 2)*/gsw,
/*THCudaTensor_data(state, output)*/output,
/*THCudaTensor_stride(state, output, 0)*/osb,
/*THCudaTensor_stride(state, output, 3)*/osc,
/*THCudaTensor_stride(state, output, 2)*/osw,
/*THCudaTensor_size(state, inputImages, 3)*/ic,
/*THCudaTensor_size(state, inputImages, 2)*/iw,
/*THCudaTensor_size(state, output, 2)*/ow);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
int nearestNeighBCWH_updateOutput_cuda_kernel_2D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int sz3,
/*THCudaTensor_size(state, inputImages, 3)*/int ic,
/*THCudaTensor_size(state, inputImages, 1)*/int iw,
/*THCudaTensor_size(state, inputImages, 2)*/int ih,
/*THCudaTensor_size(state, output, 2)*/int ow,
/*THCudaTensor_size(state, output, 2)*/int oh,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw, int ish,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw, int gsh,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw, int osh,
/*THCState_getCurrentStream(state)*/hipStream_t stream)
{
// batch channel x y
// 0 1 2 3
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((oh+bdx-1)/bdx, (ow+bdy-1)/bdy, sz3);
dim3 threads(bdx,bdy);
//printf(" iw, ih, ow, oh %d %d %d %d",iw,ih,ow,oh);
/* assume BHWD */
hipLaunchKernelGGL(( nearestNeighFromGrid_2D) , dim3(blocks), dim3(threads), 0, /*THCState_getCurrentStream(state)*/stream ,
/*THCudaTensor_data(state, inputImages)*/inputImages,
/*THCudaTensor_stride(state, inputImages, 0)*/isb,
/*THCudaTensor_stride(state, inputImages, 3)*/isc,
/*THCudaTensor_stride(state, inputImages, 1)*/ish,
/*THCudaTensor_stride(state, inputImages, 2)*/isw,
/*THCudaTensor_data(state, grids)*/grids,
/*THCudaTensor_stride(state, grids, 0)*/gsb,
/*THCudaTensor_stride(state, grids, 3)*/gsc,
/*THCudaTensor_stride(state, grids, 1)*/gsh,
/*THCudaTensor_stride(state, grids, 2)*/gsw,
/*THCudaTensor_data(state, output)*/output,
/*THCudaTensor_stride(state, output, 0)*/osb,
/*THCudaTensor_stride(state, output, 3)*/osc,
/*THCudaTensor_stride(state, output, 1)*/osh,
/*THCudaTensor_stride(state, output, 2)*/osw,
/*THCudaTensor_size(state, inputImages, 3)*/ic,
/*THCudaTensor_size(state, inputImages, 1)*/ih,
/*THCudaTensor_size(state, inputImages, 2)*/iw,
/*THCudaTensor_size(state, output, 2)*/oh, ow);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
int nearestNeighBCWHD_updateOutput_cuda_kernel_3D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int szb,
/*THCudaTensor_size(state, inputImages, 1)*/int ic,
/*THCudaTensor_size(state, inputImages, 2)*/int iw,
/*THCudaTensor_size(state, inputImages, 3)*/int ih,
/*THCudaTensor_size(state, inputImages, 4)*/int id,
/*THCudaTensor_size(state, inputImages, 2)*/int ow,
/*THCudaTensor_size(state, inputImages, 3)*/int oh,
/*THCudaTensor_size(state, inputImages, 4)*/int od,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw, int ish, int isd,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw, int gsh, int gsd,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw, int osh, int osd,
/*THCState_getCurrentStream(state)*/hipStream_t stream)
{
// batch channel x y z
// 0 1 2 3 4
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((od+bdx-1)/bdx, (oh+bdy-1)/bdy, szw*szb);
dim3 threads(bdx,bdy);
//printf(" gsh %d gsc %d osh %d osc %d ic %d id %d ih %d\n!!!!",gsh,gsc,osh,osc,ic,id,ih);
/* assume BHWD */
// (float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels,
// int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
// float* grids_data, int grids_strideBatch, int grids_strideChannels,
// int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
// float* output_data, int output_strideBatch, int output_strideChannels,
// int output_strideDepth, int output_strideHeight, int output_strideWidth,
// int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width)
hipLaunchKernelGGL(( nearestNeighFromGrid_3D) , dim3(blocks), dim3(threads), 0, /*THCState_getCurrentStream(state)*/stream ,
inputImages, isb, isc, isd, ish, isw,
grids, gsb, gsc, gsd, gsh, gsw,
output, osb, osc, osd, osh, osw,
ic, id, ih, iw, od, oh, ow);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
c0779b1aceb327823aec52af213d91d1a172b781.cu
|
#include <stdbool.h>
#include <stdio.h>
#include "nn_interpolation_kernel.h"
const int bdx =16;
const int bdy = 16;
//const int bdz =4;
#define real float
__device__ void getRound(float x, int width, int& point)
{
float xcoord = (x + 1) * (width - 1) / 2;
point = round(xcoord);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__global__ void nearestNeighFromGrid_1D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideChannels, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideWidth,
int inputImages_channels, int inputImages_width, int output_width)
{
const int wOut = blockIdx.x*blockDim.x+threadIdx.x;
const bool withinImageBounds = wOut < output_width; // asume the size of input is the same as the output
const int b = blockIdx.y;
float xf=0;
if(withinImageBounds){
int grid_address = b*grids_strideBatch + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address];
}
else
return;
int xInRound;
getRound(xf, inputImages_width, xInRound);
const int outAddress = output_strideBatch * b + output_strideWidth * wOut;
const int inRoundAddress = inputImages_strideBatch * b + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
__global__ void nearestNeighFromGrid_2D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_height, int output_width)
{
const int wOut = blockIdx.y*blockDim.y+threadIdx.y;
const int hOut = blockIdx.x*blockDim.x+threadIdx.x;
const bool withinImageBounds = wOut < output_width && hOut < output_height; // asume the size of input is the same as the output
const int b = blockIdx.z;
float yf=0;
float xf=0;
if(withinImageBounds){
int grid_address =b*grids_strideBatch + hOut*grids_strideHeight + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address];
yf = grids_data[grid_address + grids_strideYX]; // address of the 1st channel
}
else
return;
int yInRound, xInRound;
getRound(xf, inputImages_width, xInRound);
getRound(yf, inputImages_height, yInRound);
const int outAddress = output_strideBatch * b + output_strideHeight * hOut + output_strideWidth * wOut;
const int inRoundAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInRound + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1) && between(yInRound, 0, inputImages_height-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
__global__ void nearestNeighFromGrid_3D(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels,
int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideChannels,
int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels,
int output_strideDepth, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width,
int outputImages_depth, int outputImages_height, int outputImages_width)
{
const int wOut = blockIdx.z % (outputImages_width);
const int hOut = blockIdx.y*blockDim.y + threadIdx.y;
const int dOut = blockIdx.x*blockDim.x + threadIdx.x;
const bool withinImageBounds = dOut < outputImages_depth && hOut < outputImages_height; // asume the size of input is the same as the output
const int batchIdx = blockIdx.z /(outputImages_width);
float zf=0;
float yf=0;
float xf=0;
if(withinImageBounds){
int grid_address = batchIdx*grids_strideBatch + dOut*grids_strideDepth + hOut*grids_strideHeight + wOut*grids_strideWidth; // here we use the address of the 0th channel
xf = grids_data[grid_address]; // changed zf yf xz to xf yf zf, to adpat the cpu version, assume in grid the 3*1 vector is stored as width,height and depth
yf = grids_data[grid_address + grids_strideChannels];
zf = grids_data[grid_address + grids_strideChannels*2]; // address of the 1st channel
}
else
return;
int zInRound, yInRound, xInRound; // zInTopFrontRound
getRound(xf, inputImages_width, xInRound);
getRound(yf, inputImages_height, yInRound);
getRound(zf, inputImages_depth, zInRound);
const int outAddress = output_strideBatch * batchIdx + output_strideDepth * dOut + output_strideHeight * hOut + output_strideWidth * wOut; // here assume the channel will be calculated later
const int inRoundAddress = inputImages_strideBatch * batchIdx + inputImages_strideDepth*zInRound+ inputImages_strideHeight * yInRound + inputImages_strideWidth * xInRound;
float v=0;
float inRound=0;
bool RoundIsIn = between(xInRound, 0, inputImages_width-1) && between(yInRound, 0, inputImages_height-1)&& between(zInRound, 0, inputImages_depth-1);
// interpolation happens here
for(int t=0; t<inputImages_channels; t++)
{
if(RoundIsIn) inRound = inputImages_data[inRoundAddress + t*inputImages_strideChannels];
v = inRound;
output_data[outAddress + t*output_strideChannels] = v;
}
}
#ifdef __cplusplus
extern "C" {
#endif
int nearestNeighBCW_updateOutput_cuda_kernel_1D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int sz3,
/*THCudaTensor_size(state, inputImages, 3)*/int ic,
/*THCudaTensor_size(state, inputImages, 1)*/int iw,
/*THCudaTensor_size(state, output, 2)*/int ow,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw,
/*THCState_getCurrentStream(state)*/cudaStream_t stream)
{
// batch channel x y
// 0 1 2 3
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((ow+bdx-1)/bdx, sz3);
dim3 threads(bdx);
/* assume BHWD */
nearestNeighFromGrid_1D <<< blocks, threads, 0, /*THCState_getCurrentStream(state)*/stream >>> (
/*THCudaTensor_data(state, inputImages)*/inputImages,
/*THCudaTensor_stride(state, inputImages, 0)*/isb,
/*THCudaTensor_stride(state, inputImages, 3)*/isc,
/*THCudaTensor_stride(state, inputImages, 2)*/isw,
/*THCudaTensor_data(state, grids)*/grids,
/*THCudaTensor_stride(state, grids, 0)*/gsb,
/*THCudaTensor_stride(state, grids, 3)*/gsc,
/*THCudaTensor_stride(state, grids, 2)*/gsw,
/*THCudaTensor_data(state, output)*/output,
/*THCudaTensor_stride(state, output, 0)*/osb,
/*THCudaTensor_stride(state, output, 3)*/osc,
/*THCudaTensor_stride(state, output, 2)*/osw,
/*THCudaTensor_size(state, inputImages, 3)*/ic,
/*THCudaTensor_size(state, inputImages, 2)*/iw,
/*THCudaTensor_size(state, output, 2)*/ow);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
int nearestNeighBCWH_updateOutput_cuda_kernel_2D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int sz3,
/*THCudaTensor_size(state, inputImages, 3)*/int ic,
/*THCudaTensor_size(state, inputImages, 1)*/int iw,
/*THCudaTensor_size(state, inputImages, 2)*/int ih,
/*THCudaTensor_size(state, output, 2)*/int ow,
/*THCudaTensor_size(state, output, 2)*/int oh,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw, int ish,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw, int gsh,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw, int osh,
/*THCState_getCurrentStream(state)*/cudaStream_t stream)
{
// batch channel x y
// 0 1 2 3
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((oh+bdx-1)/bdx, (ow+bdy-1)/bdy, sz3);
dim3 threads(bdx,bdy);
//printf(" iw, ih, ow, oh %d %d %d %d",iw,ih,ow,oh);
/* assume BHWD */
nearestNeighFromGrid_2D <<< blocks, threads, 0, /*THCState_getCurrentStream(state)*/stream >>> (
/*THCudaTensor_data(state, inputImages)*/inputImages,
/*THCudaTensor_stride(state, inputImages, 0)*/isb,
/*THCudaTensor_stride(state, inputImages, 3)*/isc,
/*THCudaTensor_stride(state, inputImages, 1)*/ish,
/*THCudaTensor_stride(state, inputImages, 2)*/isw,
/*THCudaTensor_data(state, grids)*/grids,
/*THCudaTensor_stride(state, grids, 0)*/gsb,
/*THCudaTensor_stride(state, grids, 3)*/gsc,
/*THCudaTensor_stride(state, grids, 1)*/gsh,
/*THCudaTensor_stride(state, grids, 2)*/gsw,
/*THCudaTensor_data(state, output)*/output,
/*THCudaTensor_stride(state, output, 0)*/osb,
/*THCudaTensor_stride(state, output, 3)*/osc,
/*THCudaTensor_stride(state, output, 1)*/osh,
/*THCudaTensor_stride(state, output, 2)*/osw,
/*THCudaTensor_size(state, inputImages, 3)*/ic,
/*THCudaTensor_size(state, inputImages, 1)*/ih,
/*THCudaTensor_size(state, inputImages, 2)*/iw,
/*THCudaTensor_size(state, output, 2)*/oh, ow);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
int nearestNeighBCWHD_updateOutput_cuda_kernel_3D(/*THCudaTensor_size(state,output,2)*/int szw,
/*THCudaTensor_size(state,output,1)*/int szc,
/*THCudaTensor_size(state,output,0)*/int szb,
/*THCudaTensor_size(state, inputImages, 1)*/int ic,
/*THCudaTensor_size(state, inputImages, 2)*/int iw,
/*THCudaTensor_size(state, inputImages, 3)*/int ih,
/*THCudaTensor_size(state, inputImages, 4)*/int id,
/*THCudaTensor_size(state, inputImages, 2)*/int ow,
/*THCudaTensor_size(state, inputImages, 3)*/int oh,
/*THCudaTensor_size(state, inputImages, 4)*/int od,
/*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int isw, int ish, int isd,
/*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsw, int gsh, int gsd,
/*THCudaTensor *output*/float *output, int osb, int osc, int osw, int osh, int osd,
/*THCState_getCurrentStream(state)*/cudaStream_t stream)
{
// batch channel x y z
// 0 1 2 3 4
//dim3 blocks((THCudaTensor_size(state,output,2)+15)/16, THCudaTensor_size(state,output,1), THCudaTensor_size(state,output,0));
dim3 blocks((od+bdx-1)/bdx, (oh+bdy-1)/bdy, szw*szb);
dim3 threads(bdx,bdy);
//printf(" gsh %d gsc %d osh %d osc %d ic %d id %d ih %d\n!!!!",gsh,gsc,osh,osc,ic,id,ih);
/* assume BHWD */
// (float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels,
// int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
// float* grids_data, int grids_strideBatch, int grids_strideChannels,
// int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
// float* output_data, int output_strideBatch, int output_strideChannels,
// int output_strideDepth, int output_strideHeight, int output_strideWidth,
// int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width)
nearestNeighFromGrid_3D <<< blocks, threads, 0, /*THCState_getCurrentStream(state)*/stream >>> (
inputImages, isb, isc, isd, ish, isw,
grids, gsb, gsc, gsd, gsh, gsw,
output, osb, osc, osd, osh, osw,
ic, id, ih, iw, od, oh, ow);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nearestNeigh.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
32d4f9f1ec2ae3a4c465593c7de97b25bb89c899.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "QuaternionMath.h"
#include "TwoStepNVTRigidGPU.cuh"
#include <assert.h>
/*! \file TwoStepNVTRigidGPU.cu
\brief Defines GPU kernel code for NVT integration on the GPU. Used by TwoStepNVTRigidGPU.
*/
// Flag for invalid particle index, identical to the sentinel value NO_INDEX in RigidData.h
#define INVALID_INDEX 0xffffffff
/*! Taylor expansion
\param x Point to take the expansion
*/
__device__ Scalar taylor_exp(Scalar x)
{
Scalar x2, x3, x4, x5;
x2 = x * x;
x3 = x2 * x;
x4 = x2 * x2;
x5 = x4 * x;
return (Scalar(1.0) + x + x2 / Scalar(2.0) + x3 / Scalar(6.0) + x4 / Scalar(24.0) + x5 / Scalar(120.0));
}
#pragma mark RIGID_STEP_ONE_KERNEL
/*! Takes the first half-step forward for rigid bodies in the velocity-verlet NVT integration
\param rdata_com Body center of mass
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_body_image Body image
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param n_group_bodies Number of rigid bodies in my group
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total umber of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_one_body_kernel(Scalar4* rdata_com,
Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
int3* rdata_body_image,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
Scalar* nvt_rdata_partial_Ksum_t,
Scalar* nvt_rdata_partial_Ksum_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// do velocity verlet update
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// r(t+deltaT) = r(t) + v(t+deltaT/2)*deltaT
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, com, vel, orientation, ex_space, ey_space, ez_space, force, torque, conjqm;
int3 body_image;
Scalar4 mbody, tbody, fquat;
Scalar tmp, akin_t, akin_r;
Scalar dt_half = Scalar(0.5) * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
com = rdata_com[idx_body];
vel = rdata_vel[idx_body];
orientation = rdata_orientation[idx_body];
body_image = rdata_body_image[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
// update velocity
Scalar dtfm = dt_half / body_mass;
Scalar4 vel2;
vel2.x = vel.x + dtfm * force.x;
vel2.y = vel.y + dtfm * force.y;
vel2.z = vel.z + dtfm * force.z;
vel2.x *= nvt_rdata_scale_t.x;
vel2.y *= nvt_rdata_scale_t.y;
vel2.z *= nvt_rdata_scale_t.z;
vel2.w = vel.w;
tmp = vel2.x * vel2.x + vel2.y * vel2.y + vel2.z * vel2.z;
akin_t = body_mass * tmp;
// update position
Scalar3 pos2;
pos2.x = com.x + vel2.x * deltaT;
pos2.y = com.y + vel2.y * deltaT;
pos2.z = com.z + vel2.z * deltaT;
// time to fix the periodic boundary conditions
box.wrap(pos2, body_image);
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2;
conjqm2.x = conjqm.x + deltaT * fquat.x;
conjqm2.y = conjqm.y + deltaT * fquat.y;
conjqm2.z = conjqm.z + deltaT * fquat.z;
conjqm2.w = conjqm.w + deltaT * fquat.w;
conjqm2.x *= nvt_rdata_scale_r;
conjqm2.y *= nvt_rdata_scale_r;
conjqm2.z *= nvt_rdata_scale_r;
conjqm2.w *= nvt_rdata_scale_r;
// step 1.4 to 1.13 - use no_squish rotate to update p and q
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(1, conjqm2, orientation, moment_inertia, deltaT);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
// update the exyz_space
// transform p back to angmom
// update angular velocity
Scalar4 angmom2;
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= Scalar(0.5);
angmom2.y *= Scalar(0.5);
angmom2.z *= Scalar(0.5);
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
akin_r = angmom2.x * angvel2.x + angmom2.y * angvel2.y + angmom2.z * angvel2.z;
// write out the results (MEM_TRANSFER: ? bytes)
rdata_com[idx_body] = make_scalar4(pos2.x, pos2.y, pos2.z, com.w);
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_orientation[idx_body] = orientation;
rdata_body_image[idx_body] = body_image;
rdata_conjqm[idx_body] = conjqm2;
nvt_rdata_partial_Ksum_t[group_idx] = akin_t;
nvt_rdata_partial_Ksum_r[group_idx] = akin_r;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
hipError_t gpu_nvt_rigid_step_one(const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
assert(d_net_force);
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
// setup the grid to run the kernel for rigid bodies
int block_size = 64;
int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
hipLaunchKernelGGL(( gpu_nvt_rigid_step_one_body_kernel), dim3(body_grid), dim3(body_threads) , 0, 0, rigid_data.com,
rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.body_image,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
nvt_rdata.partial_Ksum_t,
nvt_rdata.partial_Ksum_r,
box,
deltaT);
return hipSuccess;
}
#pragma mark RIGID_STEP_TWO_KERNEL
//! Takes the 2nd 1/2 step forward in the velocity-verlet NVT integration scheme
/*!
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total number of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_two_body_kernel(Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, vel, ex_space, ey_space, ez_space, orientation, conjqm;
Scalar4 force, torque;
Scalar4 mbody, tbody, fquat;
Scalar dt_half = Scalar(0.5) * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
// Update body velocity and angmom
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
vel = rdata_vel[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
orientation = rdata_orientation[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
Scalar dtfm = dt_half / body_mass;
// update the velocity
Scalar4 vel2;
vel2.x = nvt_rdata_scale_t.x * vel.x + dtfm * force.x;
vel2.y = nvt_rdata_scale_t.y * vel.y + dtfm * force.y;
vel2.z = nvt_rdata_scale_t.z * vel.z + dtfm * force.z;
vel2.w = Scalar(0.0);
// update angular momentum
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2, angmom2;
conjqm2.x = nvt_rdata_scale_r * conjqm.x + deltaT * fquat.x;
conjqm2.y = nvt_rdata_scale_r * conjqm.y + deltaT * fquat.y;
conjqm2.z = nvt_rdata_scale_r * conjqm.z + deltaT * fquat.z;
conjqm2.w = nvt_rdata_scale_r * conjqm.w + deltaT * fquat.w;
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= Scalar(0.5);
angmom2.y *= Scalar(0.5);
angmom2.z *= Scalar(0.5);
angmom2.w = Scalar(0.0);
// update angular velocity
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
// write out results
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_conjqm[idx_body] = conjqm2;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param d_net_virial Particle net virial
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
hipError_t gpu_nvt_rigid_step_two( const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
unsigned int block_size = 64;
unsigned int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
hipLaunchKernelGGL(( gpu_nvt_rigid_step_two_body_kernel), dim3(body_grid), dim3(body_threads) , 0, 0, rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
box,
deltaT);
return hipSuccess;
}
#pragma mark RIGID_KINETIC_ENERGY_REDUCTION
//! Shared memory for kinetic energy reduction
extern __shared__ Scalar nvt_rigid_sdata[];
/*! Summing the kinetic energy of rigid bodies
\param nvt_rdata Thermostat data for rigid bodies
*/
extern "C" __global__ void gpu_nvt_rigid_reduce_ksum_kernel(gpu_nvt_rigid_data nvt_rdata)
{
int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar* body_ke_t = nvt_rigid_sdata;
Scalar* body_ke_r = &nvt_rigid_sdata[blockDim.x];
Scalar Ksum_t = Scalar(0.0), Ksum_r=Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < nvt_rdata.n_bodies; start += blockDim.x)
{
if (start + threadIdx.x < nvt_rdata.n_bodies)
{
body_ke_t[threadIdx.x] = nvt_rdata.partial_Ksum_t[start + threadIdx.x];
body_ke_r[threadIdx.x] = nvt_rdata.partial_Ksum_r[start + threadIdx.x];
}
else
{
body_ke_t[threadIdx.x] = Scalar(0.0);
body_ke_r[threadIdx.x] = Scalar(0.0);
}
__syncthreads();
// reduce the sum within a block
int offset = blockDim.x >> 1;
while (offset > 0)
{
if (threadIdx.x < offset)
{
body_ke_t[threadIdx.x] += body_ke_t[threadIdx.x + offset];
body_ke_r[threadIdx.x] += body_ke_r[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// everybody sums up Ksum
Ksum_t += body_ke_t[0];
Ksum_r += body_ke_r[0];
}
__syncthreads();
if (global_idx == 0)
{
*nvt_rdata.Ksum_t = Ksum_t;
*nvt_rdata.Ksum_r = Ksum_r;
}
}
/*!
\param nvt_rdata Thermostat data for rigid bodies
*/
hipError_t gpu_nvt_rigid_reduce_ksum(const gpu_nvt_rigid_data& nvt_rdata)
{
// setup the grid to run the kernel
int block_size = 128;
dim3 grid( 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel: double the block size to accomodate Ksum_t and Ksum_r
hipLaunchKernelGGL(( gpu_nvt_rigid_reduce_ksum_kernel), dim3(grid), dim3(threads), 2 * block_size * sizeof(Scalar) , 0, nvt_rdata);
return hipSuccess;
}
|
32d4f9f1ec2ae3a4c465593c7de97b25bb89c899.cu
|
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "QuaternionMath.h"
#include "TwoStepNVTRigidGPU.cuh"
#include <assert.h>
/*! \file TwoStepNVTRigidGPU.cu
\brief Defines GPU kernel code for NVT integration on the GPU. Used by TwoStepNVTRigidGPU.
*/
// Flag for invalid particle index, identical to the sentinel value NO_INDEX in RigidData.h
#define INVALID_INDEX 0xffffffff
/*! Taylor expansion
\param x Point to take the expansion
*/
__device__ Scalar taylor_exp(Scalar x)
{
Scalar x2, x3, x4, x5;
x2 = x * x;
x3 = x2 * x;
x4 = x2 * x2;
x5 = x4 * x;
return (Scalar(1.0) + x + x2 / Scalar(2.0) + x3 / Scalar(6.0) + x4 / Scalar(24.0) + x5 / Scalar(120.0));
}
#pragma mark RIGID_STEP_ONE_KERNEL
/*! Takes the first half-step forward for rigid bodies in the velocity-verlet NVT integration
\param rdata_com Body center of mass
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_body_image Body image
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param n_group_bodies Number of rigid bodies in my group
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total umber of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_one_body_kernel(Scalar4* rdata_com,
Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
int3* rdata_body_image,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
Scalar* nvt_rdata_partial_Ksum_t,
Scalar* nvt_rdata_partial_Ksum_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// do velocity verlet update
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// r(t+deltaT) = r(t) + v(t+deltaT/2)*deltaT
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, com, vel, orientation, ex_space, ey_space, ez_space, force, torque, conjqm;
int3 body_image;
Scalar4 mbody, tbody, fquat;
Scalar tmp, akin_t, akin_r;
Scalar dt_half = Scalar(0.5) * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
com = rdata_com[idx_body];
vel = rdata_vel[idx_body];
orientation = rdata_orientation[idx_body];
body_image = rdata_body_image[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
// update velocity
Scalar dtfm = dt_half / body_mass;
Scalar4 vel2;
vel2.x = vel.x + dtfm * force.x;
vel2.y = vel.y + dtfm * force.y;
vel2.z = vel.z + dtfm * force.z;
vel2.x *= nvt_rdata_scale_t.x;
vel2.y *= nvt_rdata_scale_t.y;
vel2.z *= nvt_rdata_scale_t.z;
vel2.w = vel.w;
tmp = vel2.x * vel2.x + vel2.y * vel2.y + vel2.z * vel2.z;
akin_t = body_mass * tmp;
// update position
Scalar3 pos2;
pos2.x = com.x + vel2.x * deltaT;
pos2.y = com.y + vel2.y * deltaT;
pos2.z = com.z + vel2.z * deltaT;
// time to fix the periodic boundary conditions
box.wrap(pos2, body_image);
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2;
conjqm2.x = conjqm.x + deltaT * fquat.x;
conjqm2.y = conjqm.y + deltaT * fquat.y;
conjqm2.z = conjqm.z + deltaT * fquat.z;
conjqm2.w = conjqm.w + deltaT * fquat.w;
conjqm2.x *= nvt_rdata_scale_r;
conjqm2.y *= nvt_rdata_scale_r;
conjqm2.z *= nvt_rdata_scale_r;
conjqm2.w *= nvt_rdata_scale_r;
// step 1.4 to 1.13 - use no_squish rotate to update p and q
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(1, conjqm2, orientation, moment_inertia, deltaT);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
// update the exyz_space
// transform p back to angmom
// update angular velocity
Scalar4 angmom2;
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= Scalar(0.5);
angmom2.y *= Scalar(0.5);
angmom2.z *= Scalar(0.5);
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
akin_r = angmom2.x * angvel2.x + angmom2.y * angvel2.y + angmom2.z * angvel2.z;
// write out the results (MEM_TRANSFER: ? bytes)
rdata_com[idx_body] = make_scalar4(pos2.x, pos2.y, pos2.z, com.w);
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_orientation[idx_body] = orientation;
rdata_body_image[idx_body] = body_image;
rdata_conjqm[idx_body] = conjqm2;
nvt_rdata_partial_Ksum_t[group_idx] = akin_t;
nvt_rdata_partial_Ksum_r[group_idx] = akin_r;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
cudaError_t gpu_nvt_rigid_step_one(const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
assert(d_net_force);
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
// setup the grid to run the kernel for rigid bodies
int block_size = 64;
int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
gpu_nvt_rigid_step_one_body_kernel<<< body_grid, body_threads >>>(rigid_data.com,
rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.body_image,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
nvt_rdata.partial_Ksum_t,
nvt_rdata.partial_Ksum_r,
box,
deltaT);
return cudaSuccess;
}
#pragma mark RIGID_STEP_TWO_KERNEL
//! Takes the 2nd 1/2 step forward in the velocity-verlet NVT integration scheme
/*!
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total number of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_two_body_kernel(Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, vel, ex_space, ey_space, ez_space, orientation, conjqm;
Scalar4 force, torque;
Scalar4 mbody, tbody, fquat;
Scalar dt_half = Scalar(0.5) * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
// Update body velocity and angmom
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
vel = rdata_vel[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
orientation = rdata_orientation[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
Scalar dtfm = dt_half / body_mass;
// update the velocity
Scalar4 vel2;
vel2.x = nvt_rdata_scale_t.x * vel.x + dtfm * force.x;
vel2.y = nvt_rdata_scale_t.y * vel.y + dtfm * force.y;
vel2.z = nvt_rdata_scale_t.z * vel.z + dtfm * force.z;
vel2.w = Scalar(0.0);
// update angular momentum
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2, angmom2;
conjqm2.x = nvt_rdata_scale_r * conjqm.x + deltaT * fquat.x;
conjqm2.y = nvt_rdata_scale_r * conjqm.y + deltaT * fquat.y;
conjqm2.z = nvt_rdata_scale_r * conjqm.z + deltaT * fquat.z;
conjqm2.w = nvt_rdata_scale_r * conjqm.w + deltaT * fquat.w;
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= Scalar(0.5);
angmom2.y *= Scalar(0.5);
angmom2.z *= Scalar(0.5);
angmom2.w = Scalar(0.0);
// update angular velocity
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
// write out results
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_conjqm[idx_body] = conjqm2;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param d_net_virial Particle net virial
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
cudaError_t gpu_nvt_rigid_step_two( const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
unsigned int block_size = 64;
unsigned int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
gpu_nvt_rigid_step_two_body_kernel<<< body_grid, body_threads >>>(rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
box,
deltaT);
return cudaSuccess;
}
#pragma mark RIGID_KINETIC_ENERGY_REDUCTION
//! Shared memory for kinetic energy reduction
extern __shared__ Scalar nvt_rigid_sdata[];
/*! Summing the kinetic energy of rigid bodies
\param nvt_rdata Thermostat data for rigid bodies
*/
extern "C" __global__ void gpu_nvt_rigid_reduce_ksum_kernel(gpu_nvt_rigid_data nvt_rdata)
{
int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar* body_ke_t = nvt_rigid_sdata;
Scalar* body_ke_r = &nvt_rigid_sdata[blockDim.x];
Scalar Ksum_t = Scalar(0.0), Ksum_r=Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < nvt_rdata.n_bodies; start += blockDim.x)
{
if (start + threadIdx.x < nvt_rdata.n_bodies)
{
body_ke_t[threadIdx.x] = nvt_rdata.partial_Ksum_t[start + threadIdx.x];
body_ke_r[threadIdx.x] = nvt_rdata.partial_Ksum_r[start + threadIdx.x];
}
else
{
body_ke_t[threadIdx.x] = Scalar(0.0);
body_ke_r[threadIdx.x] = Scalar(0.0);
}
__syncthreads();
// reduce the sum within a block
int offset = blockDim.x >> 1;
while (offset > 0)
{
if (threadIdx.x < offset)
{
body_ke_t[threadIdx.x] += body_ke_t[threadIdx.x + offset];
body_ke_r[threadIdx.x] += body_ke_r[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// everybody sums up Ksum
Ksum_t += body_ke_t[0];
Ksum_r += body_ke_r[0];
}
__syncthreads();
if (global_idx == 0)
{
*nvt_rdata.Ksum_t = Ksum_t;
*nvt_rdata.Ksum_r = Ksum_r;
}
}
/*!
\param nvt_rdata Thermostat data for rigid bodies
*/
cudaError_t gpu_nvt_rigid_reduce_ksum(const gpu_nvt_rigid_data& nvt_rdata)
{
// setup the grid to run the kernel
int block_size = 128;
dim3 grid( 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel: double the block size to accomodate Ksum_t and Ksum_r
gpu_nvt_rigid_reduce_ksum_kernel<<< grid, threads, 2 * block_size * sizeof(Scalar) >>>(nvt_rdata);
return cudaSuccess;
}
|
6c73aac1e225a0851d20321c70893ab408447af9.hip
|
// !!! This is a file automatically generated by hipify!!!
extern "C" {
#include <hip/hip_runtime.h>
#include <cstdio>
#include "timer.h"
#include "calcpi.h"
__global__ void pi_iter(const unsigned int count, const unsigned int max, const double *m, double *pieparts) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = count * index; i < count * (index + 1) && i < max; i++) {
double n_i = ((double) i + 0.5) * *m;
pieparts[index] += 4.0 / (1.0 + n_i * n_i);
}
pieparts[index] *= *m;
}
__global__ void add(double *values, const int offset, const int max) {
unsigned int index = (blockIdx.x * blockDim.x + threadIdx.x) * offset * 2;
if (index + offset < max) {
values[index] = values[index] + values[index + offset];
values[index + offset] = 0;
}
}
struct calc_result_t do_calcpi(int blocks_per_grid, int iterations) {
calc_result_t endResult;
init_end_result(&endResult);
starttimer(&endResult.total_time);
#define GRIDS 1024
#define BLOCKS blocks_per_grid
starttimer(&endResult.alloc_time);
int batch_size = GRIDS * BLOCKS;
unsigned int perThread = (iterations + batch_size) / batch_size;
int *device_offset;
hipMalloc(&device_offset, sizeof(int));
double *device_pieparts;
hipMalloc(&device_pieparts, sizeof(double) * batch_size);
double *host_pieparts = (double *) malloc(sizeof(double) * batch_size);
double m = 1.0 / (double) iterations;
double *device_m;
hipMalloc(&device_m, sizeof(double));
hipMemcpy(device_m, &m, sizeof(double), hipMemcpyHostToDevice);
stoptimer(&endResult.alloc_time);
starttimer(&endResult.calc_time);
hipLaunchKernelGGL(( pi_iter), dim3(GRIDS), dim3(BLOCKS), 0, 0, perThread, iterations, device_m, device_pieparts);
hipMemcpy(host_pieparts, device_pieparts, sizeof(double) * batch_size, hipMemcpyDeviceToHost);
int offset = 1;
do {
hipLaunchKernelGGL(( add), dim3(GRIDS), dim3(BLOCKS), 0, 0, device_pieparts, offset, batch_size);
offset *= 2;
} while (offset < batch_size);
double mypi;
hipMemcpy(&mypi, device_pieparts, sizeof(double), hipMemcpyDeviceToHost);
stoptimer(&endResult.calc_time);
starttimer(&endResult.dealloc_time);
hipFree(device_pieparts);
hipFree(device_m);
hipFree(device_offset);
free(host_pieparts);
stoptimer(&endResult.dealloc_time);
stoptimer(&endResult.total_time);
endResult.pi_value = mypi;
return endResult;
}
struct calc_result_t calc_pi(int blocks_per_grid, int iterations) {
return do_calcpi(blocks_per_grid, iterations);
}
}
|
6c73aac1e225a0851d20321c70893ab408447af9.cu
|
extern "C" {
#include <cuda.h>
#include <cstdio>
#include "timer.h"
#include "calcpi.h"
__global__ void pi_iter(const unsigned int count, const unsigned int max, const double *m, double *pieparts) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = count * index; i < count * (index + 1) && i < max; i++) {
double n_i = ((double) i + 0.5) * *m;
pieparts[index] += 4.0 / (1.0 + n_i * n_i);
}
pieparts[index] *= *m;
}
__global__ void add(double *values, const int offset, const int max) {
unsigned int index = (blockIdx.x * blockDim.x + threadIdx.x) * offset * 2;
if (index + offset < max) {
values[index] = values[index] + values[index + offset];
values[index + offset] = 0;
}
}
struct calc_result_t do_calcpi(int blocks_per_grid, int iterations) {
calc_result_t endResult;
init_end_result(&endResult);
starttimer(&endResult.total_time);
#define GRIDS 1024
#define BLOCKS blocks_per_grid
starttimer(&endResult.alloc_time);
int batch_size = GRIDS * BLOCKS;
unsigned int perThread = (iterations + batch_size) / batch_size;
int *device_offset;
cudaMalloc(&device_offset, sizeof(int));
double *device_pieparts;
cudaMalloc(&device_pieparts, sizeof(double) * batch_size);
double *host_pieparts = (double *) malloc(sizeof(double) * batch_size);
double m = 1.0 / (double) iterations;
double *device_m;
cudaMalloc(&device_m, sizeof(double));
cudaMemcpy(device_m, &m, sizeof(double), cudaMemcpyHostToDevice);
stoptimer(&endResult.alloc_time);
starttimer(&endResult.calc_time);
pi_iter<<<GRIDS, BLOCKS>>>(perThread, iterations, device_m, device_pieparts);
cudaMemcpy(host_pieparts, device_pieparts, sizeof(double) * batch_size, cudaMemcpyDeviceToHost);
int offset = 1;
do {
add<<<GRIDS, BLOCKS>>>(device_pieparts, offset, batch_size);
offset *= 2;
} while (offset < batch_size);
double mypi;
cudaMemcpy(&mypi, device_pieparts, sizeof(double), cudaMemcpyDeviceToHost);
stoptimer(&endResult.calc_time);
starttimer(&endResult.dealloc_time);
cudaFree(device_pieparts);
cudaFree(device_m);
cudaFree(device_offset);
free(host_pieparts);
stoptimer(&endResult.dealloc_time);
stoptimer(&endResult.total_time);
endResult.pi_value = mypi;
return endResult;
}
struct calc_result_t calc_pi(int blocks_per_grid, int iterations) {
return do_calcpi(blocks_per_grid, iterations);
}
}
|
9534964668885e21e48f1cd1410179ac604e031f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 3 . 0
! ---------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! CNRS, France
! and Princeton University, USA
! (there are currently many more authors!)
! (c) October 2017
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 3 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include "mesh_constants_cuda.h"
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_elastic_seismogram_kernel(int nrec_local,
realw* field,
int* d_ibool,
realw* hxir, realw* hetar, realw* hgammar,
realw* seismograms,
realw* nu,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
__shared__ realw sh_dxd[NGLL3_PADDED];
__shared__ realw sh_dyd[NGLL3_PADDED];
__shared__ realw sh_dzd[NGLL3_PADDED];
if (irec_local < nrec_local) {
int ispec = ispec_selected_rec_loc[irec_local] - 1;
sh_dxd[tx] = 0;
sh_dyd[tx] = 0;
sh_dzd[tx] = 0;
if (tx < NGLL3) {
realw hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
int iglob = iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec)]-1;
sh_dxd[tx] = hlagrange * field[0 + 3*iglob];
sh_dyd[tx] = hlagrange * field[1 + 3*iglob];
sh_dzd[tx] = hlagrange * field[2 + 3*iglob];
//debug
//if (tx == 0) printf("thread %d %d %d - %f %f %f\n",ispec,iglob,irec_local,hlagrange,field[0 + 2*iglob],field[1 + 2*iglob]);
}
__syncthreads();
// reduction
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0){ sh_dxd[tx] += sh_dxd[tx + s];
sh_dyd[tx] += sh_dyd[tx + s];
sh_dzd[tx] += sh_dzd[tx + s];}
__syncthreads();
}
int idx = INDEX3(NDIM,nrec_local,0,irec_local,it);
if (tx == 0) {
seismograms[0+idx] = nu[0+3*(0+3*irec_local)]*sh_dxd[0] + nu[0+3*(1+3*irec_local)]*sh_dyd[0] + nu[0+3*(2+3*irec_local)]*sh_dzd[0];
}
if (tx == 1) {
seismograms[1+idx] = nu[1+3*(0+3*irec_local)]*sh_dxd[0] + nu[1+3*(1+3*irec_local)]*sh_dyd[0] + nu[1+3*(2+3*irec_local)]*sh_dzd[0];
}
if (tx == 2) {
seismograms[2+idx] = nu[2+3*(0+3*irec_local)]*sh_dxd[0] + nu[2+3*(1+3*irec_local)]*sh_dyd[0] + nu[2+3*(2+3*irec_local)]*sh_dzd[0];
}
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_acoustic_seismogram_kernel(int nrec_local,
field* pressure,
int* d_ibool,
realw* hxir, realw* hetar, realw* hgammar,
field* seismograms,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
__shared__ field sh_dxd[NGLL3_PADDED];
if (irec_local < nrec_local) {
int ispec = ispec_selected_rec_loc[irec_local]-1;
sh_dxd[tx] = Make_field(0.f);
if (tx < NGLL3) {
realw hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
int iglob = iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec)]-1;
sh_dxd[tx] = hlagrange*pressure[iglob];
}
__syncthreads();
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0) {sh_dxd[tx] += sh_dxd[tx + s];}
__syncthreads();
}
int idx = INDEX2(nrec_local,irec_local,it);
// Signe moins car pression = -potential_dot_dot
if (tx == 0) seismograms[idx] = -sh_dxd[0];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_acoustic_vectorial_seismogram_kernel(int nrec_local,
int* d_ispec_is_acoustic,
field* scalar_potential,
realw* seismograms,
realw* d_rhostore,
int* d_ibool,
int * d_irregular_element_number,
realw* hxir, realw* hetar, realw* hgammar,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw xix_regular,
realw* d_hprime_xx,
realw* nu,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// shared memory
__shared__ realw s_dummy_loc[NGLL3_PADDED];
__shared__ realw s_temp1[NGLL3_PADDED];
__shared__ realw s_temp2[NGLL3_PADDED];
__shared__ realw s_temp3[NGLL3_PADDED];
__shared__ realw sh_hprime_xx[NGLL2];
// locals
realw temp1l, temp2l, temp3l;
realw rho_invl, hlagrange;
realw xixl, xiyl, xizl;
realw etaxl, etayl, etazl;
realw gammaxl, gammayl, gammazl;
realw dpotentialdxl, dpotentialdyl, dpotentialdzl;
int ispec, offset, offset_irreg, iglob, ispec_irreg;
/*
// debug
if (irec_local < nrec_local) {
ispec = ispec_selected_rec_loc[irec_local] - 1;
offset = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec);
iglob = d_ibool[offset]-1;
rho_invl = 1.f / d_rhostore[offset];
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
// loads into shared memory
if (tx < NGLL2) {
sh_hprime_xx[tx] = d_hprime_xx[tx];}
s_dummy_loc[tx] = 1.; //scalar_potential[iglob];
if (iglob > 0) {
printf(" iglob =%d, (i,j,k)=(%d,%d,%d), ispec =%d --- %f \n", iglob, I, J, K, ispec, scalar_potential[iglob]);}
else{
printf(" -illegal %d %d %d %d %d\n", tx, ispec, I, J, K);
}
}
*/
s_temp1[tx] = 0.0f;
s_temp2[tx] = 0.0f;
s_temp3[tx] = 0.0f;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
if (irec_local >= nrec_local) return;
if (tx < NGLL3) {
ispec = ispec_selected_rec_loc[irec_local] - 1;
ispec_irreg = d_irregular_element_number[ispec] - 1;
// nothing to do if we are in elastic element
if (d_ispec_is_acoustic[ispec] == 0) {return;}
offset = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec);
offset_irreg = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec_irreg);
iglob = d_ibool[offset]-1;
rho_invl = 1.f / d_rhostore[offset];
hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
}
//debug
//if (tx == 0) printf("thread %d %d %d - %f %f %f\n",ispec,iglob,irec_local,hlagrange,rho_invl, xixl);
// loads into shared memory
if (tx < NGLL2) sh_hprime_xx[tx] = d_hprime_xx[tx];
if (tx < NGLL3) s_dummy_loc[tx] = (realw)scalar_potential[iglob];
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready
__syncthreads();
if (tx < NGLL3) {
// computes first matrix product
temp1l = 0.f;
temp2l = 0.f;
temp3l = 0.f;
for (int l=0;l<NGLLX;l++) {
//assumes that hprime_xx = hprime_yy = hprime_zz
// 1. cut-plane along xi-direction
temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I];
// 2. cut-plane along eta-direction
temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J];
// 3. cut-plane along gamma-direction
temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K];
}
if (ispec_irreg >= 0){ //irregular element
xixl = d_xix[offset_irreg];
xiyl = d_xiy[offset_irreg];
xizl = d_xiz[offset_irreg];
etaxl = d_etax[offset_irreg];
etayl = d_etay[offset_irreg];
etazl = d_etaz[offset_irreg];
gammaxl = d_gammax[offset_irreg];
gammayl = d_gammay[offset_irreg];
gammazl = d_gammaz[offset_irreg];
// compute derivatives of ux, uy and uz with respect to x, y and z
// derivatives of potential
dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l;
dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l;
dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l;
}
else{
// compute derivatives of ux, uy and uz with respect to x, y and z
// derivatives of potential
dpotentialdxl = xix_regular*temp1l;
dpotentialdyl = xix_regular*temp2l;
dpotentialdzl = xix_regular*temp3l;
}
// store the field in shared memmory
s_temp1[tx] = hlagrange *dpotentialdxl * rho_invl;
s_temp2[tx] = hlagrange *dpotentialdyl * rho_invl;
s_temp3[tx] = hlagrange *dpotentialdzl * rho_invl;
}
__syncthreads();
// reduction
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0){ s_temp1[tx] += s_temp1[tx + s];
s_temp2[tx] += s_temp2[tx + s];
s_temp3[tx] += s_temp3[tx + s];}
__syncthreads();
}
int idx = INDEX3(NDIM,nrec_local,0,irec_local,it);
if (tx == 0) {
seismograms[0+idx] = nu[0+3*(0+3*irec_local)]*s_temp1[0] + nu[0+3*(1+3*irec_local)]*s_temp2[0] + nu[0+3*(2+3*irec_local)]*s_temp3[0];
}
if (tx == 1) {
seismograms[1+idx] = nu[1+3*(0+3*irec_local)]*s_temp1[0] + nu[1+3*(1+3*irec_local)]*s_temp2[0] + nu[1+3*(2+3*irec_local)]*s_temp3[0];
}
if (tx == 2) {
seismograms[2+idx] = nu[2+3*(0+3*irec_local)]*s_temp1[0] + nu[2+3*(1+3*irec_local)]*s_temp2[0] + nu[2+3*(2+3*irec_local)]*s_temp3[0];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_seismograms_cuda,
COMPUTE_SEISMOGRAMS_CUDA)(long* Mesh_pointer_f,
realw* seismograms_d,
realw* seismograms_v,
realw* seismograms_a,
realw* seismograms_p,
int* seismo_currentf,
int* NTSTEP_BETWEEN_OUTPUT_SEISMOSf,
int* it, int* it_end,
int* ACOUSTIC_SIMULATION,
int* ELASTIC_SIMULATION,
int* USE_TRICK_FOR_BETTER_PRESSURE) {
// compute_seismograms
TRACE("compute_seismograms_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); // get Mesh from fortran integer wrapper
//checks if anything to do
if (mp->nrec_local == 0) return;
int num_blocks_x, num_blocks_y;
get_blocks_xy(mp->nrec_local,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(NGLL3_PADDED,1,1);
int seismo_current = *seismo_currentf - 1 ;
int NTSTEP_BETWEEN_OUTPUT_SEISMOS = *NTSTEP_BETWEEN_OUTPUT_SEISMOSf;
// warning: put in fortran routine prepare_GPU()
/*
if (it == 0){
if (mp->save_seismograms_d || mp->save_seismograms_v || mp->save_seismograms_a){
// warnings
if (! *ELASTIC_SIMULATION)
printf("\nWarning: Wrong type of seismogram for a pure fluid simulation, use pressure in seismotype\n");
if (*ELASTIC_SIMULATION && *ACOUSTIC_SIMULATION)
printf("\nWarning: Coupled elastic/fluid simulation has only valid displacement seismograms in elastic domain for GPU simulation\n\n");
}
if (mp->save_seismograms_p){
if (! *ACOUSTIC_SIMULATION)
printf("\nWarning: Wrong type of seismogram for a pure elastic simulation, use displ veloc or accel in seismotype\n");
if (*ELASTIC_SIMULATION && *ACOUSTIC_SIMULATION)
printf("\nWarning: Coupled elastic/fluid simulation has only valid pressure seismograms in fluid domain for GPU simulation\n\n");
}
}
*/
// todo: for coupled simulations, one should check in which domain the receiver lies to output displacement
// similar to what routine compute_vector_one_element(..) is doing
// computes current seismograms value
// elastic wavefield
// acoustic wavefield
if (*ELASTIC_SIMULATION){
if (mp->save_seismograms_d)
hipLaunchKernelGGL(( compute_elastic_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_displ,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_d,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_v)
hipLaunchKernelGGL(( compute_elastic_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_veloc,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_v,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_a)
hipLaunchKernelGGL(( compute_elastic_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_accel,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_a,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
}
// acoustic wavefield
if (*ACOUSTIC_SIMULATION){
if (mp->save_seismograms_p){
if (*USE_TRICK_FOR_BETTER_PRESSURE){
hipLaunchKernelGGL(( compute_acoustic_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_potential_acoustic,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_p,
mp->d_ispec_selected_rec_loc,
seismo_current);
}else{
hipLaunchKernelGGL(( compute_acoustic_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_p,
mp->d_ispec_selected_rec_loc,
seismo_current);
}
}
// VM VM add computation of vectorial field in fluids ----------------------------------------------------------------
if (mp->save_seismograms_d)
hipLaunchKernelGGL(( compute_acoustic_vectorial_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_acoustic,
mp->d_seismograms_d,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_v)
hipLaunchKernelGGL(( compute_acoustic_vectorial_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_dot_acoustic,
mp->d_seismograms_v,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_a)
hipLaunchKernelGGL(( compute_acoustic_vectorial_seismogram_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_dot_dot_acoustic,
mp->d_seismograms_a,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
} // ACOUSTIC_SIMULATION
if (seismo_current == NTSTEP_BETWEEN_OUTPUT_SEISMOS || *it == *it_end ){
int size = mp->nrec_local * NTSTEP_BETWEEN_OUTPUT_SEISMOS * sizeof(realw);
// (hipMemcpy implicitly synchronizes all other cuda operations)
if (mp->save_seismograms_d)
print_CUDA_error_if_any(hipMemcpy(seismograms_d,mp->d_seismograms_d,NDIM * size,hipMemcpyDeviceToHost),72001);
if (mp->save_seismograms_v)
print_CUDA_error_if_any(hipMemcpy(seismograms_v,mp->d_seismograms_v,NDIM * size,hipMemcpyDeviceToHost),72002);
if (mp->save_seismograms_a)
print_CUDA_error_if_any(hipMemcpy(seismograms_a,mp->d_seismograms_a,NDIM * size,hipMemcpyDeviceToHost),72003);
// EB EB Temporary solution : in the future we will also declare host pressure seismograms as (1,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
realw * seismo_temp;
if (mp->save_seismograms_p){
// EB EB We need to reorganize data to match host array shape :
// if NB_RUNS_ACOUSTIC_GPU = 1 from fortran shape (1,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS) to (NDIM,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
// if NB_RUNS_ACOUSTIC_GPU > 1 from fortran shape (NB_RUNS_ACOUSTIC_GPU,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS) to (NDIM,nrec_local*NB_RUNS_ACOUSTIC_GPU,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
seismo_temp = (realw*)malloc(size*NB_RUNS_ACOUSTIC_GPU);
print_CUDA_error_if_any(hipMemcpy(seismo_temp,mp->d_seismograms_p,size*NB_RUNS_ACOUSTIC_GPU,hipMemcpyDeviceToHost),72004);
for (int it = 0; it<NTSTEP_BETWEEN_OUTPUT_SEISMOS; it++)
for (int i_recloc=0; i_recloc<mp->nrec_local; i_recloc++)
for (int i_run=0; i_run<NB_RUNS_ACOUSTIC_GPU; i_run++){
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,0,i_recloc,i_run,it)] = seismo_temp[INDEX3(NB_RUNS_ACOUSTIC_GPU,mp->nrec_local,i_run,i_recloc,it)];
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,1,i_recloc,i_run,it)] = 0.f;
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,2,i_recloc,i_run,it)] = 0.f;
}
free(seismo_temp);
}
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("after compute_seismograms_cuda");
#endif
}
|
9534964668885e21e48f1cd1410179ac604e031f.cu
|
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 3 . 0
! ---------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! CNRS, France
! and Princeton University, USA
! (there are currently many more authors!)
! (c) October 2017
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 3 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include "mesh_constants_cuda.h"
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_elastic_seismogram_kernel(int nrec_local,
realw* field,
int* d_ibool,
realw* hxir, realw* hetar, realw* hgammar,
realw* seismograms,
realw* nu,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
__shared__ realw sh_dxd[NGLL3_PADDED];
__shared__ realw sh_dyd[NGLL3_PADDED];
__shared__ realw sh_dzd[NGLL3_PADDED];
if (irec_local < nrec_local) {
int ispec = ispec_selected_rec_loc[irec_local] - 1;
sh_dxd[tx] = 0;
sh_dyd[tx] = 0;
sh_dzd[tx] = 0;
if (tx < NGLL3) {
realw hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
int iglob = iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec)]-1;
sh_dxd[tx] = hlagrange * field[0 + 3*iglob];
sh_dyd[tx] = hlagrange * field[1 + 3*iglob];
sh_dzd[tx] = hlagrange * field[2 + 3*iglob];
//debug
//if (tx == 0) printf("thread %d %d %d - %f %f %f\n",ispec,iglob,irec_local,hlagrange,field[0 + 2*iglob],field[1 + 2*iglob]);
}
__syncthreads();
// reduction
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0){ sh_dxd[tx] += sh_dxd[tx + s];
sh_dyd[tx] += sh_dyd[tx + s];
sh_dzd[tx] += sh_dzd[tx + s];}
__syncthreads();
}
int idx = INDEX3(NDIM,nrec_local,0,irec_local,it);
if (tx == 0) {
seismograms[0+idx] = nu[0+3*(0+3*irec_local)]*sh_dxd[0] + nu[0+3*(1+3*irec_local)]*sh_dyd[0] + nu[0+3*(2+3*irec_local)]*sh_dzd[0];
}
if (tx == 1) {
seismograms[1+idx] = nu[1+3*(0+3*irec_local)]*sh_dxd[0] + nu[1+3*(1+3*irec_local)]*sh_dyd[0] + nu[1+3*(2+3*irec_local)]*sh_dzd[0];
}
if (tx == 2) {
seismograms[2+idx] = nu[2+3*(0+3*irec_local)]*sh_dxd[0] + nu[2+3*(1+3*irec_local)]*sh_dyd[0] + nu[2+3*(2+3*irec_local)]*sh_dzd[0];
}
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_acoustic_seismogram_kernel(int nrec_local,
field* pressure,
int* d_ibool,
realw* hxir, realw* hetar, realw* hgammar,
field* seismograms,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
__shared__ field sh_dxd[NGLL3_PADDED];
if (irec_local < nrec_local) {
int ispec = ispec_selected_rec_loc[irec_local]-1;
sh_dxd[tx] = Make_field(0.f);
if (tx < NGLL3) {
realw hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
int iglob = iglob = d_ibool[INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec)]-1;
sh_dxd[tx] = hlagrange*pressure[iglob];
}
__syncthreads();
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0) {sh_dxd[tx] += sh_dxd[tx + s];}
__syncthreads();
}
int idx = INDEX2(nrec_local,irec_local,it);
// Signe moins car pression = -potential_dot_dot
if (tx == 0) seismograms[idx] = -sh_dxd[0];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void compute_acoustic_vectorial_seismogram_kernel(int nrec_local,
int* d_ispec_is_acoustic,
field* scalar_potential,
realw* seismograms,
realw* d_rhostore,
int* d_ibool,
int * d_irregular_element_number,
realw* hxir, realw* hetar, realw* hgammar,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw xix_regular,
realw* d_hprime_xx,
realw* nu,
int* ispec_selected_rec_loc,
int it){
int irec_local = blockIdx.x + blockIdx.y*gridDim.x;
int tx = threadIdx.x;
// shared memory
__shared__ realw s_dummy_loc[NGLL3_PADDED];
__shared__ realw s_temp1[NGLL3_PADDED];
__shared__ realw s_temp2[NGLL3_PADDED];
__shared__ realw s_temp3[NGLL3_PADDED];
__shared__ realw sh_hprime_xx[NGLL2];
// locals
realw temp1l, temp2l, temp3l;
realw rho_invl, hlagrange;
realw xixl, xiyl, xizl;
realw etaxl, etayl, etazl;
realw gammaxl, gammayl, gammazl;
realw dpotentialdxl, dpotentialdyl, dpotentialdzl;
int ispec, offset, offset_irreg, iglob, ispec_irreg;
/*
// debug
if (irec_local < nrec_local) {
ispec = ispec_selected_rec_loc[irec_local] - 1;
offset = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec);
iglob = d_ibool[offset]-1;
rho_invl = 1.f / d_rhostore[offset];
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
// loads into shared memory
if (tx < NGLL2) {
sh_hprime_xx[tx] = d_hprime_xx[tx];}
s_dummy_loc[tx] = 1.; //scalar_potential[iglob];
if (iglob > 0) {
printf(" iglob =%d, (i,j,k)=(%d,%d,%d), ispec =%d --- %f \n", iglob, I, J, K, ispec, scalar_potential[iglob]);}
else{
printf(" -illegal %d %d %d %d %d\n", tx, ispec, I, J, K);
}
}
*/
s_temp1[tx] = 0.0f;
s_temp2[tx] = 0.0f;
s_temp3[tx] = 0.0f;
// local index
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
if (irec_local >= nrec_local) return;
if (tx < NGLL3) {
ispec = ispec_selected_rec_loc[irec_local] - 1;
ispec_irreg = d_irregular_element_number[ispec] - 1;
// nothing to do if we are in elastic element
if (d_ispec_is_acoustic[ispec] == 0) {return;}
offset = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec);
offset_irreg = INDEX4_PADDED(NGLLX,NGLLX,NGLLX,I,J,K,ispec_irreg);
iglob = d_ibool[offset]-1;
rho_invl = 1.f / d_rhostore[offset];
hlagrange = hxir[irec_local + nrec_local*I]*hetar[irec_local + nrec_local*J]*hgammar[irec_local + nrec_local*K];
}
//debug
//if (tx == 0) printf("thread %d %d %d - %f %f %f\n",ispec,iglob,irec_local,hlagrange,rho_invl, xixl);
// loads into shared memory
if (tx < NGLL2) sh_hprime_xx[tx] = d_hprime_xx[tx];
if (tx < NGLL3) s_dummy_loc[tx] = (realw)scalar_potential[iglob];
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready
__syncthreads();
if (tx < NGLL3) {
// computes first matrix product
temp1l = 0.f;
temp2l = 0.f;
temp3l = 0.f;
for (int l=0;l<NGLLX;l++) {
//assumes that hprime_xx = hprime_yy = hprime_zz
// 1. cut-plane along xi-direction
temp1l += s_dummy_loc[K*NGLL2+J*NGLLX+l] * sh_hprime_xx[l*NGLLX+I];
// 2. cut-plane along eta-direction
temp2l += s_dummy_loc[K*NGLL2+l*NGLLX+I] * sh_hprime_xx[l*NGLLX+J];
// 3. cut-plane along gamma-direction
temp3l += s_dummy_loc[l*NGLL2+J*NGLLX+I] * sh_hprime_xx[l*NGLLX+K];
}
if (ispec_irreg >= 0){ //irregular element
xixl = d_xix[offset_irreg];
xiyl = d_xiy[offset_irreg];
xizl = d_xiz[offset_irreg];
etaxl = d_etax[offset_irreg];
etayl = d_etay[offset_irreg];
etazl = d_etaz[offset_irreg];
gammaxl = d_gammax[offset_irreg];
gammayl = d_gammay[offset_irreg];
gammazl = d_gammaz[offset_irreg];
// compute derivatives of ux, uy and uz with respect to x, y and z
// derivatives of potential
dpotentialdxl = xixl*temp1l + etaxl*temp2l + gammaxl*temp3l;
dpotentialdyl = xiyl*temp1l + etayl*temp2l + gammayl*temp3l;
dpotentialdzl = xizl*temp1l + etazl*temp2l + gammazl*temp3l;
}
else{
// compute derivatives of ux, uy and uz with respect to x, y and z
// derivatives of potential
dpotentialdxl = xix_regular*temp1l;
dpotentialdyl = xix_regular*temp2l;
dpotentialdzl = xix_regular*temp3l;
}
// store the field in shared memmory
s_temp1[tx] = hlagrange *dpotentialdxl * rho_invl;
s_temp2[tx] = hlagrange *dpotentialdyl * rho_invl;
s_temp3[tx] = hlagrange *dpotentialdzl * rho_invl;
}
__syncthreads();
// reduction
for (unsigned int s=1; s<NGLL3_PADDED ; s *= 2) {
if (tx % (2*s) == 0){ s_temp1[tx] += s_temp1[tx + s];
s_temp2[tx] += s_temp2[tx + s];
s_temp3[tx] += s_temp3[tx + s];}
__syncthreads();
}
int idx = INDEX3(NDIM,nrec_local,0,irec_local,it);
if (tx == 0) {
seismograms[0+idx] = nu[0+3*(0+3*irec_local)]*s_temp1[0] + nu[0+3*(1+3*irec_local)]*s_temp2[0] + nu[0+3*(2+3*irec_local)]*s_temp3[0];
}
if (tx == 1) {
seismograms[1+idx] = nu[1+3*(0+3*irec_local)]*s_temp1[0] + nu[1+3*(1+3*irec_local)]*s_temp2[0] + nu[1+3*(2+3*irec_local)]*s_temp3[0];
}
if (tx == 2) {
seismograms[2+idx] = nu[2+3*(0+3*irec_local)]*s_temp1[0] + nu[2+3*(1+3*irec_local)]*s_temp2[0] + nu[2+3*(2+3*irec_local)]*s_temp3[0];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_seismograms_cuda,
COMPUTE_SEISMOGRAMS_CUDA)(long* Mesh_pointer_f,
realw* seismograms_d,
realw* seismograms_v,
realw* seismograms_a,
realw* seismograms_p,
int* seismo_currentf,
int* NTSTEP_BETWEEN_OUTPUT_SEISMOSf,
int* it, int* it_end,
int* ACOUSTIC_SIMULATION,
int* ELASTIC_SIMULATION,
int* USE_TRICK_FOR_BETTER_PRESSURE) {
// compute_seismograms
TRACE("compute_seismograms_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer_f); // get Mesh from fortran integer wrapper
//checks if anything to do
if (mp->nrec_local == 0) return;
int num_blocks_x, num_blocks_y;
get_blocks_xy(mp->nrec_local,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(NGLL3_PADDED,1,1);
int seismo_current = *seismo_currentf - 1 ;
int NTSTEP_BETWEEN_OUTPUT_SEISMOS = *NTSTEP_BETWEEN_OUTPUT_SEISMOSf;
// warning: put in fortran routine prepare_GPU()
/*
if (it == 0){
if (mp->save_seismograms_d || mp->save_seismograms_v || mp->save_seismograms_a){
// warnings
if (! *ELASTIC_SIMULATION)
printf("\nWarning: Wrong type of seismogram for a pure fluid simulation, use pressure in seismotype\n");
if (*ELASTIC_SIMULATION && *ACOUSTIC_SIMULATION)
printf("\nWarning: Coupled elastic/fluid simulation has only valid displacement seismograms in elastic domain for GPU simulation\n\n");
}
if (mp->save_seismograms_p){
if (! *ACOUSTIC_SIMULATION)
printf("\nWarning: Wrong type of seismogram for a pure elastic simulation, use displ veloc or accel in seismotype\n");
if (*ELASTIC_SIMULATION && *ACOUSTIC_SIMULATION)
printf("\nWarning: Coupled elastic/fluid simulation has only valid pressure seismograms in fluid domain for GPU simulation\n\n");
}
}
*/
// todo: for coupled simulations, one should check in which domain the receiver lies to output displacement
// similar to what routine compute_vector_one_element(..) is doing
// computes current seismograms value
// elastic wavefield
// acoustic wavefield
if (*ELASTIC_SIMULATION){
if (mp->save_seismograms_d)
compute_elastic_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_displ,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_d,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_v)
compute_elastic_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_veloc,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_v,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_a)
compute_elastic_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_accel,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_a,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
}
// acoustic wavefield
if (*ACOUSTIC_SIMULATION){
if (mp->save_seismograms_p){
if (*USE_TRICK_FOR_BETTER_PRESSURE){
compute_acoustic_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_potential_acoustic,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_p,
mp->d_ispec_selected_rec_loc,
seismo_current);
}else{
compute_acoustic_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_potential_dot_dot_acoustic,
mp->d_ibool,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_seismograms_p,
mp->d_ispec_selected_rec_loc,
seismo_current);
}
}
// VM VM add computation of vectorial field in fluids ----------------------------------------------------------------
if (mp->save_seismograms_d)
compute_acoustic_vectorial_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_acoustic,
mp->d_seismograms_d,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_v)
compute_acoustic_vectorial_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_dot_acoustic,
mp->d_seismograms_v,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
if (mp->save_seismograms_a)
compute_acoustic_vectorial_seismogram_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->nrec_local,
mp->d_ispec_is_acoustic,
mp->d_potential_dot_dot_acoustic,
mp->d_seismograms_a,
mp->d_rhostore,
mp->d_ibool,
mp->d_irregular_element_number,
mp->d_hxir,mp->d_hetar,mp->d_hgammar,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->xix_regular,
mp->d_hprime_xx,
mp->d_nu,
mp->d_ispec_selected_rec_loc,
seismo_current);
} // ACOUSTIC_SIMULATION
if (seismo_current == NTSTEP_BETWEEN_OUTPUT_SEISMOS || *it == *it_end ){
int size = mp->nrec_local * NTSTEP_BETWEEN_OUTPUT_SEISMOS * sizeof(realw);
// (cudaMemcpy implicitly synchronizes all other cuda operations)
if (mp->save_seismograms_d)
print_CUDA_error_if_any(cudaMemcpy(seismograms_d,mp->d_seismograms_d,NDIM * size,cudaMemcpyDeviceToHost),72001);
if (mp->save_seismograms_v)
print_CUDA_error_if_any(cudaMemcpy(seismograms_v,mp->d_seismograms_v,NDIM * size,cudaMemcpyDeviceToHost),72002);
if (mp->save_seismograms_a)
print_CUDA_error_if_any(cudaMemcpy(seismograms_a,mp->d_seismograms_a,NDIM * size,cudaMemcpyDeviceToHost),72003);
// EB EB Temporary solution : in the future we will also declare host pressure seismograms as (1,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
realw * seismo_temp;
if (mp->save_seismograms_p){
// EB EB We need to reorganize data to match host array shape :
// if NB_RUNS_ACOUSTIC_GPU = 1 from fortran shape (1,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS) to (NDIM,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
// if NB_RUNS_ACOUSTIC_GPU > 1 from fortran shape (NB_RUNS_ACOUSTIC_GPU,nrec_local,NTSTEP_BETWEEN_OUTPUT_SEISMOS) to (NDIM,nrec_local*NB_RUNS_ACOUSTIC_GPU,NTSTEP_BETWEEN_OUTPUT_SEISMOS)
seismo_temp = (realw*)malloc(size*NB_RUNS_ACOUSTIC_GPU);
print_CUDA_error_if_any(cudaMemcpy(seismo_temp,mp->d_seismograms_p,size*NB_RUNS_ACOUSTIC_GPU,cudaMemcpyDeviceToHost),72004);
for (int it = 0; it<NTSTEP_BETWEEN_OUTPUT_SEISMOS; it++)
for (int i_recloc=0; i_recloc<mp->nrec_local; i_recloc++)
for (int i_run=0; i_run<NB_RUNS_ACOUSTIC_GPU; i_run++){
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,0,i_recloc,i_run,it)] = seismo_temp[INDEX3(NB_RUNS_ACOUSTIC_GPU,mp->nrec_local,i_run,i_recloc,it)];
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,1,i_recloc,i_run,it)] = 0.f;
seismograms_p[INDEX4(NDIM,mp->nrec_local,NB_RUNS_ACOUSTIC_GPU,2,i_recloc,i_run,it)] = 0.f;
}
free(seismo_temp);
}
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("after compute_seismograms_cuda");
#endif
}
|
f2f9660384b5879cb3e32ebbcfc7b8fe73ef575b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void multiply_by_beta_kernel(float * input, float * output, float beta)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_Size.x || j >= c_Size.y || k >= c_Size.z)
return;
long int id = (k * c_Size.y + j) * c_Size.x + i;
output[id] = input[id] * beta;
}
|
f2f9660384b5879cb3e32ebbcfc7b8fe73ef575b.cu
|
#include "includes.h"
__global__ void multiply_by_beta_kernel(float * input, float * output, float beta)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_Size.x || j >= c_Size.y || k >= c_Size.z)
return;
long int id = (k * c_Size.y + j) * c_Size.x + i;
output[id] = input[id] * beta;
}
|
466d9e44b5511cdb1151daa78ab3e489f8027f1d.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* \file dnn/src/cuda/convolution/chanwise/bwd_small.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
*
* --------------------------------------------------------------------------
*/
#include "./kern.cuh"
#include "kern_helper_hip.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "src/cuda/convolution/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180.
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(
const Param param, const T* input, const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height =
kKnownFilterHeight < 0 ? static_cast<int>(param.flt_h) : kKnownFilterHeight;
const int filter_width =
kKnownFilterWidth < 0 ? static_cast<int>(param.flt_w) : kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels + filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count = DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(
num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
hipLaunchKernelGGL(( kernel), dim3(block_count), dim3(block_dim), shared_memory_size, stream,
param, input, filter, output);
after_kernel_launch();
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
true>(param, input, filter, output, stream);
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
// ===================================bwd data==================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, 3, 3>( \
param, dst_grad, flt, src_grad, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, -1, -1>( \
param, dst_grad, flt, src_grad, stream); \
}
template <>
void run_bwd_data_small(
float* src_grad, const float* dst_grad, const float* flt, const Param& param,
hipStream_t stream) {
LAUNCH(float, float2);
}
#if TORCH_HIP_VERSION >= 9000
template <>
void run_bwd_data_small(
__half* src_grad, const __half* dst_grad, const __half* flt, const Param& param,
hipStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
466d9e44b5511cdb1151daa78ab3e489f8027f1d.cu
|
/**
* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* --------------------------------------------------------------------------
* \file dnn/src/cuda/convolution/chanwise/bwd_small.cu
*
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* This file has been modified by Megvii ("Megvii Modifications").
* All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
*
* --------------------------------------------------------------------------
*/
#include "./kern.cuh"
#include "./kern_helper.cuh"
#include "cuda.h"
#include "cuda_fp16.h"
#include "src/cuda/convolution/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180°.
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(
const Param param, const T* input, const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height =
kKnownFilterHeight < 0 ? static_cast<int>(param.flt_h) : kKnownFilterHeight;
const int filter_width =
kKnownFilterWidth < 0 ? static_cast<int>(param.flt_w) : kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels + filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count = DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(
num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
kernel<<<block_count, block_dim, shared_memory_size, stream>>>(
param, input, filter, output);
after_kernel_launch();
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
true>(param, input, filter, output, stream);
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
// ===================================bwd data==================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, 3, 3>( \
param, dst_grad, flt, src_grad, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_BACKWARD, -1, -1>( \
param, dst_grad, flt, src_grad, stream); \
}
template <>
void run_bwd_data_small(
float* src_grad, const float* dst_grad, const float* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(float, float2);
}
#if CUDA_VERSION >= 9000
template <>
void run_bwd_data_small(
__half* src_grad, const __half* dst_grad, const __half* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
19a8cc6e70cb163c4813d942d634d91ab5bcbf7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <sstream>
#include <thrust/remove.h>
#include <thrust/host_vector.h>
#include "draw/host_window.h"
#include "dg/algorithm.h"
#include "shu.cuh"
using namespace std;
using namespace dg;
const double lx = 2.*M_PI;
const double ly = 2.*M_PI;
// const unsigned k = 2;
const double D = 0.01;
const double T = 1.;
double initial( double x, double y){return 2.*sin(x)*sin(y);}
double solution( double x, double y) {return 2.*sin(x)*sin(y)*exp( -2.*T*D);}
int main()
{
unsigned n, Nx, Ny;
double eps;
cout << "Type n, Nx, Ny and eps!\n";
cin >> n >> Nx >> Ny>>eps;
const unsigned NT = (unsigned)(T*n*Nx/0.1/lx);
Grid2d grid( 0, lx, 0, ly, n, Nx, Ny, dg::PER, dg::PER);
DVec w2d( create::weights( grid));
const double dt = T/(double)NT;
/////////////////////////////////////////////////////////////////
//create CUDA context that uses OpenGL textures in Glfw window
std::stringstream title;
GLFWwindow* w = draw::glfwInitAndCreateWindow(600, 600, "Navier Stokes");
draw::RenderHostData render( 1,1);
////////////////////////////////////////////////////////////
cout << "# of Legendre coefficients: " << n<<endl;
cout << "# of grid cells: " << Nx*Ny<<endl;
cout << "Timestep " << dt << endl;
//cout << "# of timesteps " << NT << endl;
cout << "Diffusion " << D <<endl;
dg::Lamb lamb( 0.5*lx, 0.5*ly, 0.2*lx, 1);
HVec omega = evaluate ( lamb, grid);
DVec stencil = evaluate( one, grid);
DVec y0( omega);
Shu<dg::DMatrix, dg::DVec> test( grid, eps);
Diffusion<DMatrix, DVec> diffusion( grid, D);
Karniadakis< DVec > ab( y0, y0.size(), 1e-8);
////////////////////////////////glfw//////////////////////////////
//create visualisation vectors
DVec visual( grid.size());
HVec hvisual( grid.size());
//transform vector to an equidistant grid
dg::IDMatrix equidistant = dg::create::backscatter( grid );
draw::ColorMapRedBlueExt colors( 1.);
ab.init( test, diffusion, y0, dt);
while (!glfwWindowShouldClose(w))
{
//transform field to an equidistant grid
cout << "Total vorticity is: "<<blas2::dot( stencil, w2d, y0) << "\n";
cout << "Total enstrophy is: "<<blas2::dot( w2d, y0)<<"\n";
//compute the color scale
dg::blas2::symv( equidistant, y0, visual );
colors.scale() = (float)thrust::reduce( visual.begin(), visual.end(), -1., dg::AbsMax<float>() );
std::cout << "Color scale " << colors.scale() <<"\n";
//draw and swap buffers
dg::blas1::transfer(visual, hvisual);
render.renderQuad( hvisual, n*Nx, n*Ny, colors);
//step
ab( test,diffusion, y0 );
glfwSwapBuffers(w);
glfwWaitEvents();
}
glfwTerminate();
////////////////////////////////////////////////////////////////////
/*
cout << "Total vorticity is: "<< blas2::dot( stencil, w2d, y0) << "\n";
cout << "Total enstrophy is "<<blas2::dot( y0, w2d, y0)<<"\n";
blas1::axpby( 1., sol.data(), -1., y0);
hipDeviceSynchronize();
cout << "Distance to solution "<<sqrt( blas2::dot( w2d, y0))<<endl; //don't forget sqrt when comuting errors
*/
return 0;
}
|
19a8cc6e70cb163c4813d942d634d91ab5bcbf7c.cu
|
#include <iostream>
#include <iomanip>
#include <sstream>
#include <thrust/remove.h>
#include <thrust/host_vector.h>
#include "draw/host_window.h"
#include "dg/algorithm.h"
#include "shu.cuh"
using namespace std;
using namespace dg;
const double lx = 2.*M_PI;
const double ly = 2.*M_PI;
// const unsigned k = 2;
const double D = 0.01;
const double T = 1.;
double initial( double x, double y){return 2.*sin(x)*sin(y);}
double solution( double x, double y) {return 2.*sin(x)*sin(y)*exp( -2.*T*D);}
int main()
{
unsigned n, Nx, Ny;
double eps;
cout << "Type n, Nx, Ny and eps!\n";
cin >> n >> Nx >> Ny>>eps;
const unsigned NT = (unsigned)(T*n*Nx/0.1/lx);
Grid2d grid( 0, lx, 0, ly, n, Nx, Ny, dg::PER, dg::PER);
DVec w2d( create::weights( grid));
const double dt = T/(double)NT;
/////////////////////////////////////////////////////////////////
//create CUDA context that uses OpenGL textures in Glfw window
std::stringstream title;
GLFWwindow* w = draw::glfwInitAndCreateWindow(600, 600, "Navier Stokes");
draw::RenderHostData render( 1,1);
////////////////////////////////////////////////////////////
cout << "# of Legendre coefficients: " << n<<endl;
cout << "# of grid cells: " << Nx*Ny<<endl;
cout << "Timestep " << dt << endl;
//cout << "# of timesteps " << NT << endl;
cout << "Diffusion " << D <<endl;
dg::Lamb lamb( 0.5*lx, 0.5*ly, 0.2*lx, 1);
HVec omega = evaluate ( lamb, grid);
DVec stencil = evaluate( one, grid);
DVec y0( omega);
Shu<dg::DMatrix, dg::DVec> test( grid, eps);
Diffusion<DMatrix, DVec> diffusion( grid, D);
Karniadakis< DVec > ab( y0, y0.size(), 1e-8);
////////////////////////////////glfw//////////////////////////////
//create visualisation vectors
DVec visual( grid.size());
HVec hvisual( grid.size());
//transform vector to an equidistant grid
dg::IDMatrix equidistant = dg::create::backscatter( grid );
draw::ColorMapRedBlueExt colors( 1.);
ab.init( test, diffusion, y0, dt);
while (!glfwWindowShouldClose(w))
{
//transform field to an equidistant grid
cout << "Total vorticity is: "<<blas2::dot( stencil, w2d, y0) << "\n";
cout << "Total enstrophy is: "<<blas2::dot( w2d, y0)<<"\n";
//compute the color scale
dg::blas2::symv( equidistant, y0, visual );
colors.scale() = (float)thrust::reduce( visual.begin(), visual.end(), -1., dg::AbsMax<float>() );
std::cout << "Color scale " << colors.scale() <<"\n";
//draw and swap buffers
dg::blas1::transfer(visual, hvisual);
render.renderQuad( hvisual, n*Nx, n*Ny, colors);
//step
ab( test,diffusion, y0 );
glfwSwapBuffers(w);
glfwWaitEvents();
}
glfwTerminate();
////////////////////////////////////////////////////////////////////
/*
cout << "Total vorticity is: "<< blas2::dot( stencil, w2d, y0) << "\n";
cout << "Total enstrophy is "<<blas2::dot( y0, w2d, y0)<<"\n";
blas1::axpby( 1., sol.data(), -1., y0);
cudaThreadSynchronize();
cout << "Distance to solution "<<sqrt( blas2::dot( w2d, y0))<<endl; //don't forget sqrt when comuting errors
*/
return 0;
}
|
7fff1663a2c15760a77f1de4c826509bb4b1a5bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
//#include <time.h>
#define N 2 //512
__global__ void Asum(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(void){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c,size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 10;
for (int i = 0; i < N; i++)
b[i] = 10;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
//Asum<<<1,N>>>(dev_a,dev_b,dev_c);
ESBMC_verify_kernel(Asum,1,N,dev_a,dev_b,dev_c);
hipMemcpy(c,dev_c,size,hipMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]!=a[i]+b[i]);
}
free(a); free(b); free(c);
hipFree(dev_a);
hipFree(dev_c);
hipFree(dev_b);
return 0;
}
|
7fff1663a2c15760a77f1de4c826509bb4b1a5bb.cu
|
#include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <assert.h>
//#include <time.h>
#define N 2 //512
__global__ void Asum(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(void){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c,size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 10;
for (int i = 0; i < N; i++)
b[i] = 10;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
//Asum<<<1,N>>>(dev_a,dev_b,dev_c);
ESBMC_verify_kernel(Asum,1,N,dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,size,cudaMemcpyDeviceToHost);
printf("\nResultado da soma de a e b eh:\n ");
for (int i = 0; i < N; i++){
printf("%d ", c[i]);
assert(c[i]!=a[i]+b[i]);
}
free(a); free(b); free(c);
cudaFree(dev_a);
cudaFree(dev_c);
cudaFree(dev_b);
return 0;
}
|
8580fd7d84be3872eccef9965a82253607c6c724.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
using namespace std;
#define N 10
#define V 0.2
#define T 2
#define CUDA_CHECK_RETURN(value) ((hipError_t)value != hipSuccess) ? printf("Error %s at line %d in the file %s\n", hipGetErrorString((hipError_t)value), __LINE__, __FILE__) : printf("")
struct functor
{
const float koef;
functor(float _koef) : koef(_koef){}
__host__ __device__ float operator()(float x, float y)
{
return y + koef * (x - y);
}
};
void iteration(float _koef, thrust::device_vector<float> &x, thrust::device_vector<float> &y)
{
functor func(_koef);
thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func);
}
__global__ void kernel(float *f, float *res)
{
int cur = threadIdx.x + blockDim.x * blockIdx.x;
int prev = cur - 1;
if(prev == -1)
{
prev = N - 1;
}
res[cur] = f[cur] + (V * T) * (f[prev] - f[cur]);
}
int main()
{
float *Function = new float[N];
float *FunctionData = new float[N];
float *frez;
float *tempa;
hipEvent_t start, stop;
float time;
CUDA_CHECK_RETURN(hipEventCreate(&start));
CUDA_CHECK_RETURN(hipEventCreate(&stop));
for (int i = 0; i < N; i++)
{
FunctionData[i] = rand() % 100;
Function[i] = FunctionData[i];
}
CUDA_CHECK_RETURN(hipMalloc((void **)&frez, N * sizeof(float)));
CUDA_CHECK_RETURN(hipMalloc((void **)&tempa, N * sizeof(float)));
CUDA_CHECK_RETURN(hipMemcpy(tempa, Function, N * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipEventSynchronize(start));
CUDA_CHECK_RETURN(hipEventRecord(start, 0));
for(int i = 0; i < 1000; i++)
{
hipLaunchKernelGGL(( kernel) , dim3(1), dim3(N) , 0, 0, tempa, frez);
CUDA_CHECK_RETURN(hipMemcpy(Function, frez, N * sizeof(float), hipMemcpyDeviceToHost));
//CUDA_CHECK_RETURN(hipMemcpy(tempa, frez, N * sizeof(float), hipMemcpyHostToDevice));
/*for(int i = 0; i < N; i++)
{
cout << Function[i] << " ";
}
cout << endl;*/
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipEventRecord(stop, 0));
CUDA_CHECK_RETURN(hipEventSynchronize(stop));
CUDA_CHECK_RETURN(hipEventElapsedTime(&time, start, stop));
printf("CUDA: %f ms\n", time);
thrust::host_vector<float> cpumem1(N);
thrust::host_vector<float> cpumem2(N);
for (int i = 0; i < N; i++)
{
cpumem1[i] = FunctionData[i];
if(i - 1 >= 0)
{
cpumem2[i] = FunctionData[i - 1];
}
else
{
cpumem2[i] = FunctionData[N - 1];
}
}
thrust::device_vector<float> gpumem1 = cpumem1;
thrust::device_vector<float> gpumem2 = cpumem2;
CUDA_CHECK_RETURN(hipEventSynchronize(start));
CUDA_CHECK_RETURN(hipEventRecord(start, 0));
for(int i = 0; i < 1000; i++)
{
iteration(V * T, gpumem2, gpumem1);
/*for(int i = 0; i < N; i++)
{
cout << gpumem1[i] << " ";
}
cout << endl;*/
thrust::copy(gpumem1.begin(), gpumem1.end(), gpumem2.begin());
/* thrust::copy(cpumem1.begin(), cpumem1.end(), gpumem1.begin());
thrust::copy(cpumem2.begin(), cpumem2.end(), gpumem2.begin());*/
/*for(int i = 0; i < N; i++)
{
if(i - 1 >= 0)
{
gpumem2[i] = gpumem1[i - 1];
}
else
{
gpumem2[i] = gpumem1[N - 1];
}
}*/
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipEventRecord(stop, 0));
CUDA_CHECK_RETURN(hipEventSynchronize(stop));
CUDA_CHECK_RETURN(hipEventElapsedTime(&time, start, stop));
printf("Thrust: %f ms\n", time);
return 0;
}
|
8580fd7d84be3872eccef9965a82253607c6c724.cu
|
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
using namespace std;
#define N 10
#define V 0.2
#define T 2
#define CUDA_CHECK_RETURN(value) ((cudaError_t)value != cudaSuccess) ? printf("Error %s at line %d in the file %s\n", cudaGetErrorString((cudaError_t)value), __LINE__, __FILE__) : printf("")
struct functor
{
const float koef;
functor(float _koef) : koef(_koef){}
__host__ __device__ float operator()(float x, float y)
{
return y + koef * (x - y);
}
};
void iteration(float _koef, thrust::device_vector<float> &x, thrust::device_vector<float> &y)
{
functor func(_koef);
thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func);
}
__global__ void kernel(float *f, float *res)
{
int cur = threadIdx.x + blockDim.x * blockIdx.x;
int prev = cur - 1;
if(prev == -1)
{
prev = N - 1;
}
res[cur] = f[cur] + (V * T) * (f[prev] - f[cur]);
}
int main()
{
float *Function = new float[N];
float *FunctionData = new float[N];
float *frez;
float *tempa;
cudaEvent_t start, stop;
float time;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
for (int i = 0; i < N; i++)
{
FunctionData[i] = rand() % 100;
Function[i] = FunctionData[i];
}
CUDA_CHECK_RETURN(cudaMalloc((void **)&frez, N * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void **)&tempa, N * sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpy(tempa, Function, N * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaEventSynchronize(start));
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
for(int i = 0; i < 1000; i++)
{
kernel <<< 1, N >>> (tempa, frez);
CUDA_CHECK_RETURN(cudaMemcpy(Function, frez, N * sizeof(float), cudaMemcpyDeviceToHost));
//CUDA_CHECK_RETURN(cudaMemcpy(tempa, frez, N * sizeof(float), cudaMemcpyHostToDevice));
/*for(int i = 0; i < N; i++)
{
cout << Function[i] << " ";
}
cout << endl;*/
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop));
printf("CUDA: %f ms\n", time);
thrust::host_vector<float> cpumem1(N);
thrust::host_vector<float> cpumem2(N);
for (int i = 0; i < N; i++)
{
cpumem1[i] = FunctionData[i];
if(i - 1 >= 0)
{
cpumem2[i] = FunctionData[i - 1];
}
else
{
cpumem2[i] = FunctionData[N - 1];
}
}
thrust::device_vector<float> gpumem1 = cpumem1;
thrust::device_vector<float> gpumem2 = cpumem2;
CUDA_CHECK_RETURN(cudaEventSynchronize(start));
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
for(int i = 0; i < 1000; i++)
{
iteration(V * T, gpumem2, gpumem1);
/*for(int i = 0; i < N; i++)
{
cout << gpumem1[i] << " ";
}
cout << endl;*/
thrust::copy(gpumem1.begin(), gpumem1.end(), gpumem2.begin());
/* thrust::copy(cpumem1.begin(), cpumem1.end(), gpumem1.begin());
thrust::copy(cpumem2.begin(), cpumem2.end(), gpumem2.begin());*/
/*for(int i = 0; i < N; i++)
{
if(i - 1 >= 0)
{
gpumem2[i] = gpumem1[i - 1];
}
else
{
gpumem2[i] = gpumem1[N - 1];
}
}*/
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&time, start, stop));
printf("Thrust: %f ms\n", time);
return 0;
}
|
ab17cc617781a2c017ebb372e92ffeaaa1c4a263.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/base.h>
#include <utility>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "gtest/gtest.h"
#include "../../../src/common/categorical.h"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/ellpack_page.cuh"
namespace xgboost {
TEST(EllpackPage, EmptyDMatrix) {
constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256;
constexpr float kSparsity = 0;
auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix();
auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin();
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
}
TEST(EllpackPage, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols());
ASSERT_EQ(page->row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(page->row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * page->row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, FromCategoricalBasic) {
using common::AsCat;
size_t constexpr kRows = 1000, kCats = 13, kCols = 1;
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto& h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);
BatchParam p(0, max_bins);
auto ellpack = EllpackPage(m.get(), p);
auto accessor = ellpack.Impl()->GetDeviceAccessor(0);
ASSERT_EQ(kCats, accessor.NumBins());
auto x_copy = x;
std::sort(x_copy.begin(), x_copy.end());
auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin();
ASSERT_EQ(n_uniques, kCats);
std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size());
dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments);
std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size());
dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map);
ASSERT_EQ(h_cuts_ptr.size(), 2);
ASSERT_EQ(h_cuts_values.size(), kCats);
std::vector<common::CompressedByteT> const &h_gidx_buffer =
ellpack.Impl()->gidx_buffer.HostVector();
auto h_gidx_iter = common::CompressedIterator<uint32_t>(
h_gidx_buffer.data(), accessor.NumSymbols());
for (size_t i = 0; i < x.size(); ++i) {
auto bin = h_gidx_iter[i];
auto bin_value = h_cuts_values.at(bin);
ASSERT_EQ(AsCat(x[i]), AsCat(bin_value));
}
}
struct ReadRowFunction {
EllpackDeviceAccessor matrix;
int row;
bst_float* row_data_d;
ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d)
: matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {}
__device__ void operator()(size_t col) {
auto value = matrix.GetFvalue(row, col);
if (isnan(value)) {
value = -1;
}
row_data_d[col] = value;
}
};
TEST(EllpackPage, Copy) {
constexpr size_t kRows = 1024;
constexpr size_t kCols = 16;
constexpr size_t kPageSize = 1024;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kRows);
// Copy batch pages into the result page.
size_t offset = 0;
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
size_t num_elements = result.Copy(0, batch.Impl(), offset);
offset += num_elements;
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
TEST(EllpackPage, Compact) {
constexpr size_t kRows = 16;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
constexpr size_t kCompactedRows = 8;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.
std::vector<size_t> row_indexes_h {
SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX,
SIZE_MAX};
thrust::device_vector<size_t> row_indexes_d = row_indexes_h;
common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows);
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
result.Compact(0, batch.Impl(), row_indexes_span);
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
ASSERT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
size_t compacted_row = row_indexes_h[current_row];
if (compacted_row == SIZE_MAX) {
current_row++;
continue;
}
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0),
current_row, row_d.data().get()));
dh::safe_cuda(hipDeviceSynchronize());
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols,
ReadRowFunction(result.GetDeviceAccessor(0), compacted_row,
row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
} // namespace xgboost
|
ab17cc617781a2c017ebb372e92ffeaaa1c4a263.cu
|
/*!
* Copyright 2019-2020 XGBoost contributors
*/
#include <xgboost/base.h>
#include <utility>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "gtest/gtest.h"
#include "../../../src/common/categorical.h"
#include "../../../src/common/hist_util.h"
#include "../../../src/data/ellpack_page.cuh"
namespace xgboost {
TEST(EllpackPage, EmptyDMatrix) {
constexpr int kNRows = 0, kNCols = 0, kMaxBin = 256;
constexpr float kSparsity = 0;
auto dmat = RandomDataGenerator(kNRows, kNCols, kSparsity).GenerateDMatrix();
auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin();
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
}
TEST(EllpackPage, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols());
ASSERT_EQ(page->row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(page->row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * page->row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(EllpackPage, FromCategoricalBasic) {
using common::AsCat;
size_t constexpr kRows = 1000, kCats = 13, kCols = 1;
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto& h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);
BatchParam p(0, max_bins);
auto ellpack = EllpackPage(m.get(), p);
auto accessor = ellpack.Impl()->GetDeviceAccessor(0);
ASSERT_EQ(kCats, accessor.NumBins());
auto x_copy = x;
std::sort(x_copy.begin(), x_copy.end());
auto n_uniques = std::unique(x_copy.begin(), x_copy.end()) - x_copy.begin();
ASSERT_EQ(n_uniques, kCats);
std::vector<uint32_t> h_cuts_ptr(accessor.feature_segments.size());
dh::CopyDeviceSpanToVector(&h_cuts_ptr, accessor.feature_segments);
std::vector<float> h_cuts_values(accessor.gidx_fvalue_map.size());
dh::CopyDeviceSpanToVector(&h_cuts_values, accessor.gidx_fvalue_map);
ASSERT_EQ(h_cuts_ptr.size(), 2);
ASSERT_EQ(h_cuts_values.size(), kCats);
std::vector<common::CompressedByteT> const &h_gidx_buffer =
ellpack.Impl()->gidx_buffer.HostVector();
auto h_gidx_iter = common::CompressedIterator<uint32_t>(
h_gidx_buffer.data(), accessor.NumSymbols());
for (size_t i = 0; i < x.size(); ++i) {
auto bin = h_gidx_iter[i];
auto bin_value = h_cuts_values.at(bin);
ASSERT_EQ(AsCat(x[i]), AsCat(bin_value));
}
}
struct ReadRowFunction {
EllpackDeviceAccessor matrix;
int row;
bst_float* row_data_d;
ReadRowFunction(EllpackDeviceAccessor matrix, int row, bst_float* row_data_d)
: matrix(std::move(matrix)), row(row), row_data_d(row_data_d) {}
__device__ void operator()(size_t col) {
auto value = matrix.GetFvalue(row, col);
if (isnan(value)) {
value = -1;
}
row_data_d[col] = value;
}
};
TEST(EllpackPage, Copy) {
constexpr size_t kRows = 1024;
constexpr size_t kCols = 16;
constexpr size_t kPageSize = 1024;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kRows);
// Copy batch pages into the result page.
size_t offset = 0;
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
size_t num_elements = result.Copy(0, batch.Impl(), offset);
offset += num_elements;
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0), current_row, row_d.data().get()));
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(0), current_row, row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
TEST(EllpackPage, Compact) {
constexpr size_t kRows = 16;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
constexpr size_t kCompactedRows = 8;
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, 256};
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.
std::vector<size_t> row_indexes_h {
SIZE_MAX, 0, 1, 2, SIZE_MAX, 3, SIZE_MAX, 4, 5, SIZE_MAX, 6, SIZE_MAX, 7, SIZE_MAX, SIZE_MAX,
SIZE_MAX};
thrust::device_vector<size_t> row_indexes_d = row_indexes_h;
common::Span<size_t> row_indexes_span(row_indexes_d.data().get(), kRows);
for (auto& batch : dmat->GetBatches<EllpackPage>(param)) {
result.Compact(0, batch.Impl(), row_indexes_span);
}
size_t current_row = 0;
thrust::device_vector<bst_float> row_d(kCols);
thrust::device_vector<bst_float> row_result_d(kCols);
std::vector<bst_float> row(kCols);
std::vector<bst_float> row_result(kCols);
for (auto& page : dmat->GetBatches<EllpackPage>(param)) {
auto impl = page.Impl();
ASSERT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
size_t compacted_row = row_indexes_h[current_row];
if (compacted_row == SIZE_MAX) {
current_row++;
continue;
}
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0),
current_row, row_d.data().get()));
dh::safe_cuda(cudaDeviceSynchronize());
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols,
ReadRowFunction(result.GetDeviceAccessor(0), compacted_row,
row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
current_row++;
}
}
}
} // namespace xgboost
|
8246981766cd69d4ad63a9a92b1d9379336b3465.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
#include<chrono>
using namespace std;
using namespace std::chrono;
hipError_t performNormalMatrixMultiplication();
__global__ void multiply(float *dev_a, float *dev_x, float *dev_b)
{
int i = blockIdx.x;
int index = threadIdx.x + blockDim.x * blockIdx.x;
dev_b[i] = dev_a[index] * dev_x[i]; //Row multiplication of matrix A with vector x
}
//Later init can be moved to GPU
void initArrays(float *a, float *x, float *b)
{
int index = 0;
for (int i = 0; i < 32; i++) {
x[i] = i*0.56;
b[i] = 0.0;
for (int j = 0; j < 32; j++) {
a[index] = i * j * 0.045 * (index/89); //Generating a random number and storing in a[index]
index++;
}
}
}
hipError_t performNormalMatrixMultiplication()
{
int size = 32;
//Create Matrix Vectors
float c[32];//To copy final result from device to host
float *a = new float[1024]; //Total elements in one matrix 32 x 32
float *x = new float[32]; //Vector to be multiplied
float *b = new float[32]; //Resultant vector
//For use on Device
float *dev_a, *dev_x, *dev_b;
initArrays(a,x,b);
cout << sizeof(*a);
cout << sizeof(*b);
cout << sizeof(*x);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output)
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, 1024 * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_x, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, 1024 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_x, x, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//To refer each element of the matrix we get 32 blocks with 32 threads
int blocksize = 32;
int gridsize = 32;
printf("The gridsize is %d", gridsize);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
hipLaunchKernelGGL(( multiply) , dim3(gridsize), dim3(blocksize) , 0, 0, a,x,b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "multiply launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//After everything is syncronized
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << "Duration to execute the parallel portion is " << duration << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, &dev_b, 32 * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_b);
return cudaStatus;
}
int main()
{
hipError_t cudaStatus = performNormalMatrixMultiplication();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Matrix Multiply failed!");
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
|
8246981766cd69d4ad63a9a92b1d9379336b3465.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
#include<chrono>
using namespace std;
using namespace std::chrono;
cudaError_t performNormalMatrixMultiplication();
__global__ void multiply(float *dev_a, float *dev_x, float *dev_b)
{
int i = blockIdx.x;
int index = threadIdx.x + blockDim.x * blockIdx.x;
dev_b[i] = dev_a[index] * dev_x[i]; //Row multiplication of matrix A with vector x
}
//Later init can be moved to GPU
void initArrays(float *a, float *x, float *b)
{
int index = 0;
for (int i = 0; i < 32; i++) {
x[i] = i*0.56;
b[i] = 0.0;
for (int j = 0; j < 32; j++) {
a[index] = i * j * 0.045 * (index/89); //Generating a random number and storing in a[index]
index++;
}
}
}
cudaError_t performNormalMatrixMultiplication()
{
int size = 32;
//Create Matrix Vectors
float c[32];//To copy final result from device to host
float *a = new float[1024]; //Total elements in one matrix 32 x 32
float *x = new float[32]; //Vector to be multiplied
float *b = new float[32]; //Resultant vector
//For use on Device
float *dev_a, *dev_x, *dev_b;
initArrays(a,x,b);
cout << sizeof(*a);
cout << sizeof(*b);
cout << sizeof(*x);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output)
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, 1024 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_x, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, 1024 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_x, x, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//To refer each element of the matrix we get 32 blocks with 32 threads
int blocksize = 32;
int gridsize = 32;
printf("The gridsize is %d", gridsize);
high_resolution_clock::time_point t1 = high_resolution_clock::now();
multiply <<<gridsize, blocksize >>>(a,x,b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "multiply launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//After everything is syncronized
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << "Duration to execute the parallel portion is " << duration << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, &dev_b, 32 * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_b);
return cudaStatus;
}
int main()
{
cudaError_t cudaStatus = performNormalMatrixMultiplication();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Matrix Multiply failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
8115841d9a5c732b280f77e165f2f0c205fe6355.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/particle_to_grid.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
namespace dogm
{
__device__ bool is_first_particle(const ParticlesSoA& particle_array, int i)
{
return i == 0 || particle_array.grid_cell_idx[i] != particle_array.grid_cell_idx[i - 1];
}
__device__ bool is_last_particle(const ParticlesSoA& particle_array, int particle_count, int i)
{
return i == particle_count - 1 || particle_array.grid_cell_idx[i] != particle_array.grid_cell_idx[i + 1];
}
__global__ void particleToGridKernel(const ParticlesSoA particle_array, GridCell* __restrict__ grid_cell_array,
float* __restrict__ weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
int j = particle_array.grid_cell_idx[i];
if (is_first_particle(particle_array, i))
{
grid_cell_array[j].start_idx = i;
}
if (is_last_particle(particle_array, particle_count, i))
{
grid_cell_array[j].end_idx = i;
}
// printf("Cell: %d, Start idx: %d, End idx: %d\n", j, grid_cell_array[j].start_idx,
// grid_cell_array[j].end_idx);
weight_array[i] = particle_array.weight[i];
}
}
} /* namespace dogm */
|
8115841d9a5c732b280f77e165f2f0c205fe6355.cu
|
// Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/particle_to_grid.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
namespace dogm
{
__device__ bool is_first_particle(const ParticlesSoA& particle_array, int i)
{
return i == 0 || particle_array.grid_cell_idx[i] != particle_array.grid_cell_idx[i - 1];
}
__device__ bool is_last_particle(const ParticlesSoA& particle_array, int particle_count, int i)
{
return i == particle_count - 1 || particle_array.grid_cell_idx[i] != particle_array.grid_cell_idx[i + 1];
}
__global__ void particleToGridKernel(const ParticlesSoA particle_array, GridCell* __restrict__ grid_cell_array,
float* __restrict__ weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
int j = particle_array.grid_cell_idx[i];
if (is_first_particle(particle_array, i))
{
grid_cell_array[j].start_idx = i;
}
if (is_last_particle(particle_array, particle_count, i))
{
grid_cell_array[j].end_idx = i;
}
// printf("Cell: %d, Start idx: %d, End idx: %d\n", j, grid_cell_array[j].start_idx,
// grid_cell_array[j].end_idx);
weight_array[i] = particle_array.weight[i];
}
}
} /* namespace dogm */
|
f6224446036266908432fda86cb3ff9043468288.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
zparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
zparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
zparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
zparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
double entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_z_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_z_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_z_matrix
Current lower triangular factor.
@param[in]
U magma_z_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_z_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_z_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zparilut_candidates_gpu(
magma_z_matrix L0,
magma_z_matrix U0,
magma_z_matrix L,
magma_z_matrix U,
magma_z_matrix *L_new,
magma_z_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
double thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_zmfree(L_new, queue);
magma_zmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_zindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
hipLaunchKernelGGL(( zparilut_candidates_count_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
hipLaunchKernelGGL(( zparilut_candidates_count_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_zget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_zget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_zmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_zmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_zvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_zvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
hipLaunchKernelGGL(( zparilut_candidates_insert_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
hipLaunchKernelGGL(( zparilut_candidates_insert_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_zthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_zthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
|
f6224446036266908432fda86cb3ff9043468288.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
zparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
zparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
zparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_Z_ONE + MAGMA_Z_ONE + MAGMA_Z_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
zparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaDoubleComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaDoubleComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
double entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_z_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_z_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_z_matrix
Current lower triangular factor.
@param[in]
U magma_z_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_z_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_z_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zparilut_candidates_gpu(
magma_z_matrix L0,
magma_z_matrix U0,
magma_z_matrix L,
magma_z_matrix U,
magma_z_matrix *L_new,
magma_z_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
double thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_zmfree(L_new, queue);
magma_zmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_zindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
zparilut_candidates_count_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
zparilut_candidates_count_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_zget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_zget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_zindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_zmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_zmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_zvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_zvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_zindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_zindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
zparilut_candidates_insert_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
zparilut_candidates_insert_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_zthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_zthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
|
12a1db803358a00f88d9d97320ea44740f7237f7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
#include <device_launch_parameters.h>
/*
muestra los hilos disponibles por grid en tu targeta cuda
*/
// estructura de dim3
// dim3 blocks(Bx, By, Bz);
// dim3 threads(hx, hy, hz);
gridDim.x = Bx
gridDim.y = By
gridDim.z = Bz
blockDim.x = hx
blockDim.y = hy
blockDim.z = hz
int main(int argc, char** argv)
{
hipDeviceProp_t deviceProp;
int deviceID;
hipGetDevice(&deviceID);
hipGetDeviceProperties(&deviceProp, deviceID);
printf("MAX threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("MAX BLOCK SIZE\n");
printf(" [x -> %d]\n [y -> %d]\n [z -> %d]\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("MAX GRID SIZE\n");
printf(" [x -> %d]\n [y -> %d]\n [z -> %d]\n"), deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2];
//dim3 bloques(3, 2, 1);
//dim3 hilos(16, 16, 1);
printf("\n pulsa INTRO parsa finalizar...");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
12a1db803358a00f88d9d97320ea44740f7237f7.cu
|
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <vector_types.h>
#include <device_launch_parameters.h>
/*
muestra los hilos disponibles por grid en tu targeta cuda
*/
// estructura de dim3
// dim3 blocks(Bx, By, Bz);
// dim3 threads(hx, hy, hz);
gridDim.x = Bx
gridDim.y = By
gridDim.z = Bz
blockDim.x = hx
blockDim.y = hy
blockDim.z = hz
int main(int argc, char** argv)
{
cudaDeviceProp deviceProp;
int deviceID;
cudaGetDevice(&deviceID);
cudaGetDeviceProperties(&deviceProp, deviceID);
printf("MAX threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("MAX BLOCK SIZE\n");
printf(" [x -> %d]\n [y -> %d]\n [z -> %d]\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("MAX GRID SIZE\n");
printf(" [x -> %d]\n [y -> %d]\n [z -> %d]\n"), deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2];
//dim3 bloques(3, 2, 1);
//dim3 hilos(16, 16, 1);
printf("\n pulsa INTRO parsa finalizar...");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
7ac288a1d4a2ab3511a754cb146758ce98b7ac60.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(hipSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(hipMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), hipMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = dh::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = ::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(hipSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(nodes_), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(tree_segments_), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpyAsync(dh::Raw(tree_group_), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(hipSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, info.num_col_,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
thrust::device_vector<DevicePredictionNode> nodes_;
thrust::device_vector<size_t> tree_segments_;
thrust::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor")) {} // NOLINT
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param_.gpu_id, param_.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
param_.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param_.gpu_id, param_.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param_;
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
7ac288a1d4a2ab3511a754cb146758ce98b7ac60.cu
|
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
/*! \brief prediction parameters */
struct GPUPredictionParam : public dmlc::Parameter<GPUPredictionParam> {
int gpu_id;
int n_gpus;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUPredictionParam) {
DMLC_DECLARE_FIELD(gpu_id).set_lower_bound(0).set_default(0).describe(
"Device ordinal for GPU prediction.");
DMLC_DECLARE_FIELD(n_gpus).set_lower_bound(-1).set_default(1).describe(
"Number of devices to use for prediction.");
}
};
DMLC_REGISTER_PARAMETER(GPUPredictionParam);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = dh::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = std::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(cudaSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(nodes_), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(tree_segments_), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpyAsync(dh::Raw(tree_group_), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(cudaSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(dh::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * info.num_col_ * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, info.num_col_,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
thrust::device_vector<DevicePredictionNode> nodes_;
thrust::device_vector<size_t> tree_segments_;
thrust::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor")) {} // NOLINT
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(
param_.gpu_id, param_.n_gpus, dmat->Info().num_row_);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
param_.InitAllowUnknown(cfg);
GPUSet devices = GPUSet::All(param_.gpu_id, param_.n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
GPUPredictionParam param_;
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
8db0fe9c43feb3c96567a9365b1701d596ff9c6a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
#define MAX_PATH_LEN (32 * 1024)
#define MAX_KERNEL_RADIUS 16
// thread block size
#define TX 32
#define TY 32
struct kernel_params {
float kernel[MAX_KERNEL_RADIUS + 1];
int w;
int h;
};
static void error(const char * message) {
fprintf(stderr, "ERROR: %s\n", message);
exit(-1);
}
static void usage(const char * message, const char * app) {
fprintf(stderr, "Usage: %s width height sigma file1 ... fileN\n", app);
fprintf(stderr, "Example: %s 1920 1080 3 f1.gray f2.gray f3.gray\n", app);
error(message);
}
static double timer_ms() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec * 0.001;
}
static __device__ int saturate(int n, int max_value) {
return max(0, min(n, max_value - 1));
}
static __device__ int get_pix(const uint8_t * src, int w, int h, int x, int y) {
return (float)src[saturate(x, w) + saturate(y, h) * w];
}
static __global__ void convolution_vertical(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY + 2 * MAX_KERNEL_RADIUS][TX];
// all threads populate shared cache
for(int ny = 0; ny < 2; ny++) {
cache[threadIdx.y + ny * TY][threadIdx.x]
= get_pix(src, p.w, p.h, x, y - MAX_KERNEL_RADIUS + ny * TY);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[MAX_KERNEL_RADIUS + threadIdx.y][threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y - k][threadIdx.x];
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y + k][threadIdx.x];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static __global__ void convolution_horizontal(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY][TX + 2 * MAX_KERNEL_RADIUS];
// all threads populate shared cache
for(int nx = 0; nx < 2; nx++) {
cache[threadIdx.y][threadIdx.x + nx * TX]
= get_pix(src, p.w, p.h, x - MAX_KERNEL_RADIUS + nx * TX, y);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x + k];
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x - k];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static float gaussian(float sigma, float x) {
const float e = x / sigma;
return exp(-0.5 * e * e);
}
int main(int argn, char ** argv) {
kernel_params params;
if(argn < 4) {
usage("Wrong argument count", *argv);
}
// read width and height
params.w = atoi(argv[1]);
params.h = atoi(argv[2]);
if(params.w < 1 || params.h < 1) {
usage("Both width and height must be positive integers", *argv);
}
const int pix_count = params.w * params.h;
// read sigma and prepare normalized kernel (sum = 1)
const float sigma = atof(argv[3]);
float kernel_sum = 0.0f;
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
kernel_sum += params.kernel[k] = gaussian(sigma, k);
}
kernel_sum = 2.0 * kernel_sum - params.kernel[0];
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
params.kernel[k] /= kernel_sum;
}
// dump the kernel
printf("Convolution kernel:");
for(int k = -MAX_KERNEL_RADIUS; k <= MAX_KERNEL_RADIUS; k++) {
printf(" %f", params.kernel[k < 0 ? -k : k]);
}
printf("\n");
// prepare buffers
uint8_t * const data_ptr = (uint8_t*)malloc(pix_count);
uint8_t * data_gpu_ptr;
uint8_t * temp_gpu_ptr;
hipMalloc((void**)&data_gpu_ptr, pix_count);
hipMalloc((void**)&temp_gpu_ptr, pix_count);
// measure time of processing of all images
const double begin = timer_ms();
for(int i = 4; i < argn; i++) {
// read input data
printf("Processing '%s'\n", argv[i]);
FILE * const src_file = fopen(argv[i], "rb");
if(NULL == src_file || 1 != fread(data_ptr, pix_count, 1, src_file)) {
error(argv[i]);
}
fclose(src_file);
// copy data to GPU memory
hipMemcpy(data_gpu_ptr, data_ptr, pix_count, hipMemcpyHostToDevice);
// launch vertical and horizontal pass
dim3 block(TX, TY);
dim3 grid((params.w + TX - 1) / TX, (params.h + TY - 1) / TY);
hipLaunchKernelGGL(( convolution_vertical), dim3(grid), dim3(block), 0, 0, params, data_gpu_ptr, temp_gpu_ptr);
hipLaunchKernelGGL(( convolution_horizontal), dim3(grid), dim3(block), 0, 0, params, temp_gpu_ptr, data_gpu_ptr);
// copy data back from GPU
hipMemcpy(data_ptr, data_gpu_ptr, pix_count, hipMemcpyDeviceToHost);
// compose output filename
char out_path[MAX_PATH_LEN + 1];
snprintf(out_path, MAX_PATH_LEN, "%s.out.gray", argv[i]);
// write data to output file
FILE * const out_file = fopen(out_path, "wb");
if(NULL == out_file || 1 != fwrite(data_ptr, pix_count, 1, out_file)) {
error(out_path);
}
fclose(out_file);
}
const double end = timer_ms();
// print total time
printf("time: %f ms, %d images => %f ms/image\n",
end - begin, argn - 4, (end - begin) / (argn - 4));
// cleanup
free(data_ptr);
hipFree(data_gpu_ptr);
hipFree(temp_gpu_ptr);
return 0;
}
|
8db0fe9c43feb3c96567a9365b1701d596ff9c6a.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
#define MAX_PATH_LEN (32 * 1024)
#define MAX_KERNEL_RADIUS 16
// thread block size
#define TX 32
#define TY 32
struct kernel_params {
float kernel[MAX_KERNEL_RADIUS + 1];
int w;
int h;
};
static void error(const char * message) {
fprintf(stderr, "ERROR: %s\n", message);
exit(-1);
}
static void usage(const char * message, const char * app) {
fprintf(stderr, "Usage: %s width height sigma file1 ... fileN\n", app);
fprintf(stderr, "Example: %s 1920 1080 3 f1.gray f2.gray f3.gray\n", app);
error(message);
}
static double timer_ms() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec * 0.001;
}
static __device__ int saturate(int n, int max_value) {
return max(0, min(n, max_value - 1));
}
static __device__ int get_pix(const uint8_t * src, int w, int h, int x, int y) {
return (float)src[saturate(x, w) + saturate(y, h) * w];
}
static __global__ void convolution_vertical(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY + 2 * MAX_KERNEL_RADIUS][TX];
// all threads populate shared cache
for(int ny = 0; ny < 2; ny++) {
cache[threadIdx.y + ny * TY][threadIdx.x]
= get_pix(src, p.w, p.h, x, y - MAX_KERNEL_RADIUS + ny * TY);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[MAX_KERNEL_RADIUS + threadIdx.y][threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y - k][threadIdx.x];
result += p.kernel[k] * cache[MAX_KERNEL_RADIUS + threadIdx.y + k][threadIdx.x];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static __global__ void convolution_horizontal(kernel_params p, uint8_t * src, uint8_t * dest) {
// coordinates of pixel processed by this thread
const int x = threadIdx.x + blockIdx.x * TX;
const int y = threadIdx.y + blockIdx.y * TY;
// shared cache for processed pixels
__shared__ float cache[TY][TX + 2 * MAX_KERNEL_RADIUS];
// all threads populate shared cache
for(int nx = 0; nx < 2; nx++) {
cache[threadIdx.y][threadIdx.x + nx * TX]
= get_pix(src, p.w, p.h, x - MAX_KERNEL_RADIUS + nx * TX, y);
}
// wait for all threads of block to finish their contribution to cache
__syncthreads();
// stop this thread if out of bounds
if(x >= p.w || y >= p.h) {
return;
}
// get weighted sum of neighbors
float result = p.kernel[0] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x];
for(int k = 1; k <= MAX_KERNEL_RADIUS; k++) {
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x + k];
result += p.kernel[k] * cache[threadIdx.y][MAX_KERNEL_RADIUS + threadIdx.x - k];
}
// save result
dest[x + y * p.w] = saturate((int)result, 256);
}
static float gaussian(float sigma, float x) {
const float e = x / sigma;
return exp(-0.5 * e * e);
}
int main(int argn, char ** argv) {
kernel_params params;
if(argn < 4) {
usage("Wrong argument count", *argv);
}
// read width and height
params.w = atoi(argv[1]);
params.h = atoi(argv[2]);
if(params.w < 1 || params.h < 1) {
usage("Both width and height must be positive integers", *argv);
}
const int pix_count = params.w * params.h;
// read sigma and prepare normalized kernel (sum = 1)
const float sigma = atof(argv[3]);
float kernel_sum = 0.0f;
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
kernel_sum += params.kernel[k] = gaussian(sigma, k);
}
kernel_sum = 2.0 * kernel_sum - params.kernel[0];
for(int k = 0; k <= MAX_KERNEL_RADIUS; k++) {
params.kernel[k] /= kernel_sum;
}
// dump the kernel
printf("Convolution kernel:");
for(int k = -MAX_KERNEL_RADIUS; k <= MAX_KERNEL_RADIUS; k++) {
printf(" %f", params.kernel[k < 0 ? -k : k]);
}
printf("\n");
// prepare buffers
uint8_t * const data_ptr = (uint8_t*)malloc(pix_count);
uint8_t * data_gpu_ptr;
uint8_t * temp_gpu_ptr;
cudaMalloc((void**)&data_gpu_ptr, pix_count);
cudaMalloc((void**)&temp_gpu_ptr, pix_count);
// measure time of processing of all images
const double begin = timer_ms();
for(int i = 4; i < argn; i++) {
// read input data
printf("Processing '%s'\n", argv[i]);
FILE * const src_file = fopen(argv[i], "rb");
if(NULL == src_file || 1 != fread(data_ptr, pix_count, 1, src_file)) {
error(argv[i]);
}
fclose(src_file);
// copy data to GPU memory
cudaMemcpy(data_gpu_ptr, data_ptr, pix_count, cudaMemcpyHostToDevice);
// launch vertical and horizontal pass
dim3 block(TX, TY);
dim3 grid((params.w + TX - 1) / TX, (params.h + TY - 1) / TY);
convolution_vertical<<<grid, block>>>(params, data_gpu_ptr, temp_gpu_ptr);
convolution_horizontal<<<grid, block>>>(params, temp_gpu_ptr, data_gpu_ptr);
// copy data back from GPU
cudaMemcpy(data_ptr, data_gpu_ptr, pix_count, cudaMemcpyDeviceToHost);
// compose output filename
char out_path[MAX_PATH_LEN + 1];
snprintf(out_path, MAX_PATH_LEN, "%s.out.gray", argv[i]);
// write data to output file
FILE * const out_file = fopen(out_path, "wb");
if(NULL == out_file || 1 != fwrite(data_ptr, pix_count, 1, out_file)) {
error(out_path);
}
fclose(out_file);
}
const double end = timer_ms();
// print total time
printf("time: %f ms, %d images => %f ms/image\n",
end - begin, argn - 4, (end - begin) / (argn - 4));
// cleanup
free(data_ptr);
cudaFree(data_gpu_ptr);
cudaFree(temp_gpu_ptr);
return 0;
}
|
f73aae2502d7937693e4f5fce3e6f336c649101f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <cstdlib>
#include <sstream>
#include <iostream>
#include <fstream>
#include <string.h>
#include <stdlib.h>
#include <bits/stdc++.h>
#include <vector>
#include "input_image.cuh"
//#include "complex.cuh"
#include "math.h"
using namespace std;
#define pi 3.14159265358979323846
__global__ void makeCol(Complex *d_col,Complex *d_img, int which, int N){
int k=threadIdx.x+blockIdx.x*blockDim.x;
// d_col[k]->real=d_img[k*N+which]->real;
// d_col[k]->imag=d_img[k*N+which]->imag;
d_col[k]=d_img[k*N+which];
}
__global__ void makeRow(Complex *d_row,Complex *d_img, int which, int N){
int k=threadIdx.x+blockIdx.x*blockDim.x;
d_row[k]=d_img[which*N+k];
}
__global__ void fft(Complex *d_img, int N){ //fft for each row
int k=threadIdx.x+blockIdx.x*blockDim.x; //which row
// Complex** d_vec=new Complex*[N];
//Get your column/row
// for (int i=0;i<N;i++){
// if (dim==1){ //columns
// d_vec[i]=d_img[i*N+k];
// }else { //rows
// d_vec[i]=d_img[k*N+i];
// }
// }
//split even and odd halves
// Complex temp[N/2];
// for(int i=0; i<N/2; i++) //copy all odd elements to upper half
// temp[i] = column[i*2+1];
// for(int i=0; i<N/2; i++) // copy all even elements to lower half
// column[i] = column[i*2];
// for(int i=0; i<N/2; i++) // copy all odd back to upper-half
// column[i+N/2] = temp[i];
int half, first, last;
int current=N;
int num=1;
while(current>1) { //starting from N down to 2
half = current/2;
for (int i = 0; i < num; i++) {
Complex W(cos(2 * pi / current * i), -sin(2 * pi / current * i ));
//Jfirst=i*current;
//Jlast=i*current+half-1;
for(int J=i*current;J<i*current+half-1;J++) {
Complex odd = d_img[k * N + J + half + i];
Complex even = d_img[k * N + J + i];
d_img[k * N + J + half + i] = even - W * odd;
d_img[k * N + J + i] = even + W * odd;
}
}
num*=2;
Pcurrent=half;
}
}
//__global__ void fft1d(Complex *d_vec, int N){
// int k=threadIdx.x+blockIdx.x*blockDim.x;
// if (N < 2) { //base case - done
// } else {
//
// //Complex W = exp(Complex(0, -2. * pi * k / N));
//
// //split even and odd halves
//// Complex temp[N/2];
//// for(int i=0; i<N/2; i++) //copy all odd elements to upper half
//// temp[i] = column[i*2+1];
//// for(int i=0; i<N/2; i++) // copy all even elements to lower half
//// column[i] = column[i*2];
//// for(int i=0; i<N/2; i++) // copy all odd back to upper-half
//// column[i+N/2] = temp[i];
//
// //Recurse for each half of column/row
// fft1d<<<1, N/2>>>(d_vec,N/2);
// fft1d<<<1, N/2>>>(d_vec+N/2,N/2);
//
// for (int i = 0; i < N / 2; i++) {
// Complex W(cos(2*pi/N*i*k), -sin(2*pi/N*i*k));
// Complex even = d_vec[i * 2];
// Complex odd = d_vec[i * 2 + 1];
//
// d_vec[i*2] = even + W * odd;
// d_vec[i*2+1] = even - W * odd;
//
// }
//
// }
//
//
//}
__global__ void putImage(Complex *d_vec, Complex *d_img, int which,int N,int mode){
int k=threadIdx.x+blockIdx.x*blockDim.x;
if (mode==1) {
d_img[k * N + which] = d_vec[k];
} else{
d_img[which*N+k] = d_vec[k];
}
}
__global__ void ifft(Complex *imgvec, int N){
}
int main(int argc, char *argv[]) {
//read inputs
string direction = argv[1];
InputImage input = InputImage(argv[2]);
int N=input.get_width(); //N = width = height
string outputStr = argv[3];
const char *outputfile = outputStr.c_str();
Complex *d_vec, *d_row, *d_img;
int blockNum=ceil((float)N/1024);
int colsize=N*sizeof(Complex);
int imgsize=(colsize^2);
int threadNum;
if (N==2048){
threadNum=1024;
} else{
threadNum=N;
}
hipMalloc((void **)&d_vec,colsize);
hipMalloc((void **)&d_img,imgsize);
hipMemcpy(d_img,input.get_image_data(),imgsize,hipMemcpyHostToDevice);
//do the fft
if (direction=="forward"){
//columns first
for (int i=0;i<N;i++) {
//makeCol<<<blockNum,threadNum>>>(d_vec,d_img, i, N);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( fft), dim3(blockNum),dim3(threadNum), 0, 0, d_img, N);
//putImage<<<blockNum,threadNum>>>(d_vec,d_img,i,N,1);
//fft<<<blockNum,threadNum>>>(d_img,N,2);
hipDeviceSynchronize();
}
//then rows
// for (int i=0;i<N;i++) {
// makeRow<<<blockNum,threadNum>>>(d_vec,d_img, i, N);
// hipDeviceSynchronize();
// fft1d<<<blockNum,threadNum>>>(d_vec, N);
// putImage<<<blockNum,threadNum>>>(d_vec,d_img,i,N,2);
// //fft<<<blockNum,threadNum>>>(d_img,N,2);
// hipDeviceSynchronize();
//
// }
//save image to file
hipMemcpy(input.get_image_data(),d_img,imgsize,hipMemcpyDeviceToHost);
input.save_image_data(outputfile,input.get_image_data(),N,N);
cout<<"FFTs saved."<<endl;
}
if (direction=="reverse"){
//ifft(input.data,width);
}
return 0;
}
|
f73aae2502d7937693e4f5fce3e6f336c649101f.cu
|
#include <stdio.h>
#include <cstdlib>
#include <sstream>
#include <iostream>
#include <fstream>
#include <string.h>
#include <stdlib.h>
#include <bits/stdc++.h>
#include <vector>
#include "input_image.cuh"
//#include "complex.cuh"
#include "math.h"
using namespace std;
#define pi 3.14159265358979323846
__global__ void makeCol(Complex *d_col,Complex *d_img, int which, int N){
int k=threadIdx.x+blockIdx.x*blockDim.x;
// d_col[k]->real=d_img[k*N+which]->real;
// d_col[k]->imag=d_img[k*N+which]->imag;
d_col[k]=d_img[k*N+which];
}
__global__ void makeRow(Complex *d_row,Complex *d_img, int which, int N){
int k=threadIdx.x+blockIdx.x*blockDim.x;
d_row[k]=d_img[which*N+k];
}
__global__ void fft(Complex *d_img, int N){ //fft for each row
int k=threadIdx.x+blockIdx.x*blockDim.x; //which row
// Complex** d_vec=new Complex*[N];
//Get your column/row
// for (int i=0;i<N;i++){
// if (dim==1){ //columns
// d_vec[i]=d_img[i*N+k];
// }else { //rows
// d_vec[i]=d_img[k*N+i];
// }
// }
//split even and odd halves
// Complex temp[N/2];
// for(int i=0; i<N/2; i++) //copy all odd elements to upper half
// temp[i] = column[i*2+1];
// for(int i=0; i<N/2; i++) // copy all even elements to lower half
// column[i] = column[i*2];
// for(int i=0; i<N/2; i++) // copy all odd back to upper-half
// column[i+N/2] = temp[i];
int half, first, last;
int current=N;
int num=1;
while(current>1) { //starting from N down to 2
half = current/2;
for (int i = 0; i < num; i++) {
Complex W(cos(2 * pi / current * i), -sin(2 * pi / current * i ));
//Jfirst=i*current;
//Jlast=i*current+half-1;
for(int J=i*current;J<i*current+half-1;J++) {
Complex odd = d_img[k * N + J + half + i];
Complex even = d_img[k * N + J + i];
d_img[k * N + J + half + i] = even - W * odd;
d_img[k * N + J + i] = even + W * odd;
}
}
num*=2;
Pcurrent=half;
}
}
//__global__ void fft1d(Complex *d_vec, int N){
// int k=threadIdx.x+blockIdx.x*blockDim.x;
// if (N < 2) { //base case - done
// } else {
//
// //Complex W = exp(Complex(0, -2. * pi * k / N));
//
// //split even and odd halves
//// Complex temp[N/2];
//// for(int i=0; i<N/2; i++) //copy all odd elements to upper half
//// temp[i] = column[i*2+1];
//// for(int i=0; i<N/2; i++) // copy all even elements to lower half
//// column[i] = column[i*2];
//// for(int i=0; i<N/2; i++) // copy all odd back to upper-half
//// column[i+N/2] = temp[i];
//
// //Recurse for each half of column/row
// fft1d<<<1, N/2>>>(d_vec,N/2);
// fft1d<<<1, N/2>>>(d_vec+N/2,N/2);
//
// for (int i = 0; i < N / 2; i++) {
// Complex W(cos(2*pi/N*i*k), -sin(2*pi/N*i*k));
// Complex even = d_vec[i * 2];
// Complex odd = d_vec[i * 2 + 1];
//
// d_vec[i*2] = even + W * odd;
// d_vec[i*2+1] = even - W * odd;
//
// }
//
// }
//
//
//}
__global__ void putImage(Complex *d_vec, Complex *d_img, int which,int N,int mode){
int k=threadIdx.x+blockIdx.x*blockDim.x;
if (mode==1) {
d_img[k * N + which] = d_vec[k];
} else{
d_img[which*N+k] = d_vec[k];
}
}
__global__ void ifft(Complex *imgvec, int N){
}
int main(int argc, char *argv[]) {
//read inputs
string direction = argv[1];
InputImage input = InputImage(argv[2]);
int N=input.get_width(); //N = width = height
string outputStr = argv[3];
const char *outputfile = outputStr.c_str();
Complex *d_vec, *d_row, *d_img;
int blockNum=ceil((float)N/1024);
int colsize=N*sizeof(Complex);
int imgsize=(colsize^2);
int threadNum;
if (N==2048){
threadNum=1024;
} else{
threadNum=N;
}
cudaMalloc((void **)&d_vec,colsize);
cudaMalloc((void **)&d_img,imgsize);
cudaMemcpy(d_img,input.get_image_data(),imgsize,cudaMemcpyHostToDevice);
//do the fft
if (direction=="forward"){
//columns first
for (int i=0;i<N;i++) {
//makeCol<<<blockNum,threadNum>>>(d_vec,d_img, i, N);
//cudaDeviceSynchronize();
fft<<<blockNum,threadNum>>>(d_img, N);
//putImage<<<blockNum,threadNum>>>(d_vec,d_img,i,N,1);
//fft<<<blockNum,threadNum>>>(d_img,N,2);
cudaDeviceSynchronize();
}
//then rows
// for (int i=0;i<N;i++) {
// makeRow<<<blockNum,threadNum>>>(d_vec,d_img, i, N);
// cudaDeviceSynchronize();
// fft1d<<<blockNum,threadNum>>>(d_vec, N);
// putImage<<<blockNum,threadNum>>>(d_vec,d_img,i,N,2);
// //fft<<<blockNum,threadNum>>>(d_img,N,2);
// cudaDeviceSynchronize();
//
// }
//save image to file
cudaMemcpy(input.get_image_data(),d_img,imgsize,cudaMemcpyDeviceToHost);
input.save_image_data(outputfile,input.get_image_data(),N,N);
cout<<"FFTs saved."<<endl;
}
if (direction=="reverse"){
//ifft(input.data,width);
}
return 0;
}
|
7be6895ea2fa1ce7a553797a1252cf10151d7f20.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 12
// tugas 1: alokasi memori dan transfer dari device ke host
__global__ void
kern(int *A)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
A[idx] = idx;
}
/**
* Host main routine
*/
int main(void)
{
// alokasikan memori, dan salin nilainya
int *A = (int *) malloc(N*sizeof(int)); //alokasi memori di host
int *d_A;
hipMalloc(&d_A,N*sizeof(int)); //alokasi memori di device
hipMemcpy(d_A,A,N*sizeof(int),hipMemcpyHostToDevice);
dim3 grid,block;
block.x = 4;
grid.x = 12/block.x;
hipLaunchKernelGGL(( kern), dim3(grid),dim3(block), 0, 0, d_A);
hipMemcpy(A,d_A,N*sizeof(int),hipMemcpyDeviceToHost);
// copy result
for(int i = 0;i < N;i++)
printf("A[%d] = %d\n",i,A[i]);
free(A);
hipFree(d_A);
return 0;
}
|
7be6895ea2fa1ce7a553797a1252cf10151d7f20.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#define N 12
// tugas 1: alokasi memori dan transfer dari device ke host
__global__ void
kern(int *A)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
A[idx] = idx;
}
/**
* Host main routine
*/
int main(void)
{
// alokasikan memori, dan salin nilainya
int *A = (int *) malloc(N*sizeof(int)); //alokasi memori di host
int *d_A;
cudaMalloc(&d_A,N*sizeof(int)); //alokasi memori di device
cudaMemcpy(d_A,A,N*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid,block;
block.x = 4;
grid.x = 12/block.x;
kern<<<grid,block>>>(d_A);
cudaMemcpy(A,d_A,N*sizeof(int),cudaMemcpyDeviceToHost);
// copy result
for(int i = 0;i < N;i++)
printf("A[%d] = %d\n",i,A[i]);
free(A);
cudaFree(d_A);
return 0;
}
|
612bc0e9eaac8790e8197982c53a33fcc98e664e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "ex/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
612bc0e9eaac8790e8197982c53a33fcc98e664e.cu
|
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "ex/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
00f53cb6a2ee4a54c07f06ee8d155382cd1c2115.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CSE 5441 : Lab 4 p2
* Filename : biswas_rajarshi_part2.cu
* Author : Rajarshi Biswas ([email protected])
* The Ohio State University.
*/
#include <math.h>
#include <iostream>
#include "lab4_lib/read_bmp.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS_X 32
#define THREADS_Y 32
/*
* Kernel function. Runs on GPU.
* bmp_data - Actual image information.
* new_bmp_data - Modified image information.
* black_cell_count - Counts the number of black cells.
* threshold - Current threshold.
* wd - Width of the image.
* ht - Height of the image.
*/
__global__ void compute_on_device(uint8_t *bmp_data, uint8_t *new_bmp_img, uint32_t *black_cell_count,
uint32_t threshold, uint32_t wd, uint32_t ht)
{
uint32_t i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint32_t j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint32_t index = i * wd + j;
if (index > wd*ht) {
return;
// Check the boundary.
}
if ( (i >= 1 && i < (ht-1)) && (j >= 1 && j < (wd - 1))) {
float Gx = bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i-1)*wd + (j-1) ]
+ 2*bmp_data[ (i)*wd + (j+1) ] - 2*bmp_data[ (i)*wd + (j-1) ]
+ bmp_data[ (i+1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ];
float Gy = bmp_data[ (i-1)*wd + (j-1) ] + 2*bmp_data[ (i-1)*wd + (j) ]
+ bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ]
- 2*bmp_data[ (i+1)*wd + (j) ] - bmp_data[ (i+1)*wd + (j+1) ];
float mag = sqrt(Gx * Gx + Gy * Gy);
if (mag > threshold) {
new_bmp_img[ index ] = 255;
} else {
new_bmp_img[index] = 0;
// Increment the number of black count cell atomically.
atomicAdd(black_cell_count, 1);
}
} else {
return;
}
}
/*
* Wrapper function that calls the kernel function.
* in_file - The input image file.
* cuda_out_file- The output image file.
* Returns the threshold value at convergence.
*/
int cuda_processing(FILE *in_file, FILE *cuda_out_file) {
bmp_image img1;
uint8_t *host_bmp_data = (uint8_t *) img1.read_bmp_file(in_file);
//Get image attributes
uint32_t wd = img1.image_width;
uint32_t ht = img1.image_height;
uint32_t num_pixel = img1.num_pixel;
uint8_t* host_new_bmp_img = (uint8_t*) malloc(num_pixel);
// Initialize the device memory.
uint8_t *device_bmp_data;
uint8_t *device_new_bmp_img;
uint32_t *device_black_cell_count;
hipMalloc((void**) &device_bmp_data, num_pixel);
hipMalloc((void**) &device_new_bmp_img, num_pixel);
hipMalloc((void **) &device_black_cell_count, sizeof(uint32_t));
// Initialize the array to 0.
for (int i = 0 ;i < num_pixel; i++) {
host_new_bmp_img[i] = 0;
}
// copy it to cuda mem.
hipMemcpy(device_bmp_data, host_bmp_data, num_pixel, hipMemcpyHostToDevice);
hipMemcpy(device_new_bmp_img, host_new_bmp_img, num_pixel, hipMemcpyHostToDevice);
uint32_t threshold = 0;
uint32_t black_cell_count = 0;
dim3 threadsPerBlock(THREADS_X, THREADS_Y);
dim3 blocksPerGrid((ht/THREADS_X) + 1, (wd/THREADS_Y) + 1);
//Convergence loop
while (black_cell_count < (75 * wd * ht/100))
{
black_cell_count = 0;
hipMemcpy(device_black_cell_count, &black_cell_count, sizeof(uint32_t),hipMemcpyHostToDevice);
threshold += 1;
// Call cuda kernel.
hipLaunchKernelGGL(( compute_on_device) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, device_bmp_data,
device_new_bmp_img, device_black_cell_count, threshold, wd, ht);
hipMemcpy(host_new_bmp_img, device_new_bmp_img, num_pixel, hipMemcpyDeviceToHost);
hipMemcpy(&black_cell_count, device_black_cell_count, sizeof(uint32_t), hipMemcpyDeviceToHost);
}
img1.write_bmp_file(cuda_out_file, host_new_bmp_img);
// Free all the memory.
free(host_bmp_data);
free(host_new_bmp_img);
//free(host_black_cell_count);
hipFree(device_bmp_data);
hipFree(device_new_bmp_img);
hipFree(device_black_cell_count);
//hipFree(device_black_cell_count);
return threshold;
}
/*
* Serial function.
* in_file - The input image file.
* cuda_out_file- The output image file.
* Returns the threshold value at convergence.
*/
int serial_processing(FILE *in_file, FILE *serial_out_file) {
bmp_image img1;
uint8_t *bmp_data = (uint8_t *) img1.read_bmp_file(in_file);
//Allocate new output buffer of same size
uint8_t* new_bmp_img = (uint8_t*)malloc(img1.num_pixel);
// Initialize the array to 0.
for (int i = 0 ;i < img1.num_pixel; i++) {
new_bmp_img[i] = 0;
}
//Get image attributes
uint32_t wd = img1.image_width;
uint32_t ht = img1.image_height;
//Convergence loop
uint32_t threshold = 0;
uint32_t black_cell_count = 0;
// Serial version
while(black_cell_count < (75*wd*ht/100))
{
black_cell_count = 0;
threshold += 1;
for(int i=1; i < (ht-1); i++)
{
for(int j=1; j < (wd-1); j++)
{
float Gx = bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i-1)*wd + (j-1) ]
+ 2*bmp_data[ (i)*wd + (j+1) ] - 2*bmp_data[ (i)*wd + (j-1) ]
+ bmp_data[ (i+1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ];
float Gy = bmp_data[ (i-1)*wd + (j-1) ] + 2*bmp_data[ (i-1)*wd + (j) ]
+ bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ]
- 2*bmp_data[ (i+1)*wd + (j) ] - bmp_data[ (i+1)*wd + (j+1) ];
float mag = sqrt(Gx * Gx + Gy * Gy);
if(mag > threshold) {
new_bmp_img[ i * wd + j] = 255;
} else {
new_bmp_img[ i * wd + j] = 0;
black_cell_count++;
}
}
}
}
img1.write_bmp_file(serial_out_file, new_bmp_img);
free(bmp_data);
free(new_bmp_img);
return threshold;
}
int main(int argc, char* argv[]) {
struct timespec start, end;
FILE *in_file = fopen(argv[1], "rb");
FILE *serial_out_file = fopen(argv[2], "wb");
FILE *cuda_out_file = fopen(argv[3], "wb");
printf("\n**************************************************\n");
// Serial version
printf("\n** Serial version **\n\n");
clock_gettime(CLOCK_REALTIME, &start);
uint32_t serial_threshold = serial_processing(in_file, serial_out_file);
clock_gettime(CLOCK_REALTIME, &end);
double time_taken_serial = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
printf("\nTime taken for serial sobel operation: %.5f sec",time_taken_serial);
printf("\nThreshold during convergence: %d", serial_threshold );
printf("\n\n** CUDA version **\n\n");
// Cuda version
clock_gettime(CLOCK_REALTIME, &start);
uint32_t cuda_threshold = cuda_processing(in_file, cuda_out_file);
clock_gettime(CLOCK_REALTIME, &end);
double time_taken_cuda = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
// Print the result of cuda version.
printf("\nTime taken for CUDA sobel operation: %.5f sec",time_taken_cuda);
printf("\nThreshold during convergence: %d", cuda_threshold );
printf("\n**************************************************\n\n");
fclose(in_file);
return 0;
}
|
00f53cb6a2ee4a54c07f06ee8d155382cd1c2115.cu
|
/*
* CSE 5441 : Lab 4 p2
* Filename : biswas_rajarshi_part2.cu
* Author : Rajarshi Biswas ([email protected])
* The Ohio State University.
*/
#include <math.h>
#include <iostream>
#include "lab4_lib/read_bmp.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS_X 32
#define THREADS_Y 32
/*
* Kernel function. Runs on GPU.
* bmp_data - Actual image information.
* new_bmp_data - Modified image information.
* black_cell_count - Counts the number of black cells.
* threshold - Current threshold.
* wd - Width of the image.
* ht - Height of the image.
*/
__global__ void compute_on_device(uint8_t *bmp_data, uint8_t *new_bmp_img, uint32_t *black_cell_count,
uint32_t threshold, uint32_t wd, uint32_t ht)
{
uint32_t i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint32_t j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint32_t index = i * wd + j;
if (index > wd*ht) {
return;
// Check the boundary.
}
if ( (i >= 1 && i < (ht-1)) && (j >= 1 && j < (wd - 1))) {
float Gx = bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i-1)*wd + (j-1) ]
+ 2*bmp_data[ (i)*wd + (j+1) ] - 2*bmp_data[ (i)*wd + (j-1) ]
+ bmp_data[ (i+1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ];
float Gy = bmp_data[ (i-1)*wd + (j-1) ] + 2*bmp_data[ (i-1)*wd + (j) ]
+ bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ]
- 2*bmp_data[ (i+1)*wd + (j) ] - bmp_data[ (i+1)*wd + (j+1) ];
float mag = sqrt(Gx * Gx + Gy * Gy);
if (mag > threshold) {
new_bmp_img[ index ] = 255;
} else {
new_bmp_img[index] = 0;
// Increment the number of black count cell atomically.
atomicAdd(black_cell_count, 1);
}
} else {
return;
}
}
/*
* Wrapper function that calls the kernel function.
* in_file - The input image file.
* cuda_out_file- The output image file.
* Returns the threshold value at convergence.
*/
int cuda_processing(FILE *in_file, FILE *cuda_out_file) {
bmp_image img1;
uint8_t *host_bmp_data = (uint8_t *) img1.read_bmp_file(in_file);
//Get image attributes
uint32_t wd = img1.image_width;
uint32_t ht = img1.image_height;
uint32_t num_pixel = img1.num_pixel;
uint8_t* host_new_bmp_img = (uint8_t*) malloc(num_pixel);
// Initialize the device memory.
uint8_t *device_bmp_data;
uint8_t *device_new_bmp_img;
uint32_t *device_black_cell_count;
cudaMalloc((void**) &device_bmp_data, num_pixel);
cudaMalloc((void**) &device_new_bmp_img, num_pixel);
cudaMalloc((void **) &device_black_cell_count, sizeof(uint32_t));
// Initialize the array to 0.
for (int i = 0 ;i < num_pixel; i++) {
host_new_bmp_img[i] = 0;
}
// copy it to cuda mem.
cudaMemcpy(device_bmp_data, host_bmp_data, num_pixel, cudaMemcpyHostToDevice);
cudaMemcpy(device_new_bmp_img, host_new_bmp_img, num_pixel, cudaMemcpyHostToDevice);
uint32_t threshold = 0;
uint32_t black_cell_count = 0;
dim3 threadsPerBlock(THREADS_X, THREADS_Y);
dim3 blocksPerGrid((ht/THREADS_X) + 1, (wd/THREADS_Y) + 1);
//Convergence loop
while (black_cell_count < (75 * wd * ht/100))
{
black_cell_count = 0;
cudaMemcpy(device_black_cell_count, &black_cell_count, sizeof(uint32_t),cudaMemcpyHostToDevice);
threshold += 1;
// Call cuda kernel.
compute_on_device <<< blocksPerGrid, threadsPerBlock >>> (device_bmp_data,
device_new_bmp_img, device_black_cell_count, threshold, wd, ht);
cudaMemcpy(host_new_bmp_img, device_new_bmp_img, num_pixel, cudaMemcpyDeviceToHost);
cudaMemcpy(&black_cell_count, device_black_cell_count, sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
img1.write_bmp_file(cuda_out_file, host_new_bmp_img);
// Free all the memory.
free(host_bmp_data);
free(host_new_bmp_img);
//free(host_black_cell_count);
cudaFree(device_bmp_data);
cudaFree(device_new_bmp_img);
cudaFree(device_black_cell_count);
//cudaFree(device_black_cell_count);
return threshold;
}
/*
* Serial function.
* in_file - The input image file.
* cuda_out_file- The output image file.
* Returns the threshold value at convergence.
*/
int serial_processing(FILE *in_file, FILE *serial_out_file) {
bmp_image img1;
uint8_t *bmp_data = (uint8_t *) img1.read_bmp_file(in_file);
//Allocate new output buffer of same size
uint8_t* new_bmp_img = (uint8_t*)malloc(img1.num_pixel);
// Initialize the array to 0.
for (int i = 0 ;i < img1.num_pixel; i++) {
new_bmp_img[i] = 0;
}
//Get image attributes
uint32_t wd = img1.image_width;
uint32_t ht = img1.image_height;
//Convergence loop
uint32_t threshold = 0;
uint32_t black_cell_count = 0;
// Serial version
while(black_cell_count < (75*wd*ht/100))
{
black_cell_count = 0;
threshold += 1;
for(int i=1; i < (ht-1); i++)
{
for(int j=1; j < (wd-1); j++)
{
float Gx = bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i-1)*wd + (j-1) ]
+ 2*bmp_data[ (i)*wd + (j+1) ] - 2*bmp_data[ (i)*wd + (j-1) ]
+ bmp_data[ (i+1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ];
float Gy = bmp_data[ (i-1)*wd + (j-1) ] + 2*bmp_data[ (i-1)*wd + (j) ]
+ bmp_data[ (i-1)*wd + (j+1) ] - bmp_data[ (i+1)*wd + (j-1) ]
- 2*bmp_data[ (i+1)*wd + (j) ] - bmp_data[ (i+1)*wd + (j+1) ];
float mag = sqrt(Gx * Gx + Gy * Gy);
if(mag > threshold) {
new_bmp_img[ i * wd + j] = 255;
} else {
new_bmp_img[ i * wd + j] = 0;
black_cell_count++;
}
}
}
}
img1.write_bmp_file(serial_out_file, new_bmp_img);
free(bmp_data);
free(new_bmp_img);
return threshold;
}
int main(int argc, char* argv[]) {
struct timespec start, end;
FILE *in_file = fopen(argv[1], "rb");
FILE *serial_out_file = fopen(argv[2], "wb");
FILE *cuda_out_file = fopen(argv[3], "wb");
printf("\n**************************************************\n");
// Serial version
printf("\n** Serial version **\n\n");
clock_gettime(CLOCK_REALTIME, &start);
uint32_t serial_threshold = serial_processing(in_file, serial_out_file);
clock_gettime(CLOCK_REALTIME, &end);
double time_taken_serial = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
printf("\nTime taken for serial sobel operation: %.5f sec",time_taken_serial);
printf("\nThreshold during convergence: %d", serial_threshold );
printf("\n\n** CUDA version **\n\n");
// Cuda version
clock_gettime(CLOCK_REALTIME, &start);
uint32_t cuda_threshold = cuda_processing(in_file, cuda_out_file);
clock_gettime(CLOCK_REALTIME, &end);
double time_taken_cuda = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
// Print the result of cuda version.
printf("\nTime taken for CUDA sobel operation: %.5f sec",time_taken_cuda);
printf("\nThreshold during convergence: %d", cuda_threshold );
printf("\n**************************************************\n\n");
fclose(in_file);
return 0;
}
|
1c62248421fed5c436030df5d05ce0edb2a6aeee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 256 // 2^9
#define BLOCKS 1 // 2^15
#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand()/(float)RAND_MAX;
}
void array_print(float *arr, int length)
{
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(float *values)
{
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
hipMalloc((void**) &dev_values, size);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
hipLaunchKernelGGL(( bitonic_sort_step), dim3(blocks), dim3(threads), 0, 0, dev_values, j, k);
}
}
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
}
int main(void)
{
clock_t start, stop;
float *values = (float*) malloc( NUM_VALS * sizeof(float));
array_fill(values, NUM_VALS);
start = clock();
bitonic_sort(values); /* Inplace */
for(int i=0; i<NUM_VALS; i++)
printf("%d\t%f\n", i, values[i]);
stop = clock();
print_elapsed(start, stop);
}
|
1c62248421fed5c436030df5d05ce0edb2a6aeee.cu
|
/*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 256 // 2^9
#define BLOCKS 1 // 2^15
#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand()/(float)RAND_MAX;
}
void array_print(float *arr, int length)
{
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(float *values)
{
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
cudaMalloc((void**) &dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
int main(void)
{
clock_t start, stop;
float *values = (float*) malloc( NUM_VALS * sizeof(float));
array_fill(values, NUM_VALS);
start = clock();
bitonic_sort(values); /* Inplace */
for(int i=0; i<NUM_VALS; i++)
printf("%d\t%f\n", i, values[i]);
stop = clock();
print_elapsed(start, stop);
}
|
ec95d63870949049ad4f313b2c1aa9cd52943321.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from dgemm_tesla_N_N_special.cu normal d -> s, Fri Jul 18 17:34:13 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
B += tx + __mul24(iby+ty, ldb);
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
m = 2*lda;
n = 3*lda;
do {
//float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
float Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ float Bb[16][17];
Bb[tx][ty+0 ] = B[0];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i=0; i < 16; i++) {
C[0] = alpha * Cb[i] + beta * C[0];
C += ldc;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
hipLaunchKernelGGL(( sgemm_kernel_N_N_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
ec95d63870949049ad4f313b2c1aa9cd52943321.cu
|
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from dgemm_tesla_N_N_special.cu normal d -> s, Fri Jul 18 17:34:13 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
B += tx + __mul24(iby+ty, ldb);
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
m = 2*lda;
n = 3*lda;
do {
//float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
float Ab[4] = {A[0], A[lda], A[m], A[n]};
__shared__ float Bb[16][17];
Bb[tx][ty+0 ] = B[0];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i=0; i < 16; i++) {
C[0] = alpha * Cb[i] + beta * C[0];
C += ldc;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
sgemm_kernel_N_N_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
9a69712362e34e0561ae1e93c671418fc38f27f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/regularizers/no_regularizer.hpp"
#include "HugeCTR/include/utils.cuh"
#include <utility>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
NoRegularizer::NoRegularizer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff,
const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff,
const int batch_size, const int device_id)
: Regularizer(weight_buff, wgrad_buff, batch_size, device_id) {}
void NoRegularizer::do_compute_rterm(const float* weight, float* rterm, int num_elements,
hipStream_t stream) {
*rterm = 0.0f;
}
void NoRegularizer::do_initialize_wgrad(const float* weight, float* wgrad, int num_elements,
hipStream_t stream) {
int n_blocks = get_n_sms() * 4;
int block_size = 512;
hipLaunchKernelGGL(( initialize_array), dim3(n_blocks), dim3(block_size), 0, stream, wgrad, num_elements, 0.0f);
}
} // namespace HugeCTR
|
9a69712362e34e0561ae1e93c671418fc38f27f6.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/regularizers/no_regularizer.hpp"
#include "HugeCTR/include/utils.cuh"
#include <utility>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
NoRegularizer::NoRegularizer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff,
const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff,
const int batch_size, const int device_id)
: Regularizer(weight_buff, wgrad_buff, batch_size, device_id) {}
void NoRegularizer::do_compute_rterm(const float* weight, float* rterm, int num_elements,
cudaStream_t stream) {
*rterm = 0.0f;
}
void NoRegularizer::do_initialize_wgrad(const float* weight, float* wgrad, int num_elements,
cudaStream_t stream) {
int n_blocks = get_n_sms() * 4;
int block_size = 512;
initialize_array<<<n_blocks, block_size, 0, stream>>>(wgrad, num_elements, 0.0f);
}
} // namespace HugeCTR
|
0de4633a1cb9072af1eb4850dd498d1c50a4bd1b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void init(int *a, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
a[i] = 0;
}
__global__ void packing(int *key, int *bucket, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void scan(int *a, int *b, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
for(int j = 1;j < n;j <<= 1){
b[i] = a[i];
__syncthreads();
if(i-j >= 0) a[i] += b[i-j];
__syncthreads();
}
}
__global__ void unpacking(int *key, int *bucket, int n, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
int top = range-1;
int bottom = -1;
int middle;
while(top-bottom > 1){
middle = (top+bottom)/2;
if(i >= bucket[middle]) bottom = middle;
else top = middle;
}
key[i] = top;
}
int main() {
int n = 50;
const int m = 1024;
int range = 5;
int *key;
hipMallocManaged(&key, n*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
int *bucket;
hipMallocManaged(&bucket, range*sizeof(int));
hipLaunchKernelGGL(( init), dim3((n+m-1)/m), dim3(m), 0, 0, bucket, n);
hipLaunchKernelGGL(( packing), dim3((n+m-1)/m), dim3(m), 0, 0, key, bucket, n);
int *scan_mem;
hipMallocManaged(&scan_mem, range*sizeof(int));
hipLaunchKernelGGL(( scan), dim3((range+m-1)/m), dim3(m), 0, 0, bucket, scan_mem, range);
hipLaunchKernelGGL(( unpacking), dim3((n+m-1)/m), dim3(m), 0, 0, key, bucket, n, range);
hipDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
|
0de4633a1cb9072af1eb4850dd498d1c50a4bd1b.cu
|
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void init(int *a, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
a[i] = 0;
}
__global__ void packing(int *key, int *bucket, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void scan(int *a, int *b, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
for(int j = 1;j < n;j <<= 1){
b[i] = a[i];
__syncthreads();
if(i-j >= 0) a[i] += b[i-j];
__syncthreads();
}
}
__global__ void unpacking(int *key, int *bucket, int n, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
int top = range-1;
int bottom = -1;
int middle;
while(top-bottom > 1){
middle = (top+bottom)/2;
if(i >= bucket[middle]) bottom = middle;
else top = middle;
}
key[i] = top;
}
int main() {
int n = 50;
const int m = 1024;
int range = 5;
int *key;
cudaMallocManaged(&key, n*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
int *bucket;
cudaMallocManaged(&bucket, range*sizeof(int));
init<<<(n+m-1)/m, m>>>(bucket, n);
packing<<<(n+m-1)/m, m>>>(key, bucket, n);
int *scan_mem;
cudaMallocManaged(&scan_mem, range*sizeof(int));
scan<<<(range+m-1)/m, m>>>(bucket, scan_mem, range);
unpacking<<<(n+m-1)/m, m>>>(key, bucket, n, range);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
|
78246ea9284b08bf088cb1fb658dbab7ec1ce99e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Cuckoo Cycle, a memory-hard proof-of-work
// Copyright (c) 2013-2016 John Tromp
// The edge-trimming memory optimization is due to Dave Andersen
// http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html
#include <stdint.h>
#include <string.h>
#include "cuckoo.h"
#ifndef MAXSOLS
#define MAXSOLS 1
#endif
#define MAXINT (1<<31-1)
#if SIZESHIFT <= 32
typedef u32 nonce_t;
typedef u32 node_t;
typedef uint2 edge_t;
#define make_edge make_uint2
#else
typedef u64 nonce_t;
typedef u64 node_t;
typedef ulong2 edge_t;
#define make_edge make_ulong2
#endif
#include <openssl/sha.h>
typedef unsigned long long ull;
static __device__ __forceinline__ bool operator== (edge_t a, edge_t b) { return a.x == b.x && a.y == b.y; }
// d(evice s)ipnode
#if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain
static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); }
static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; }
static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) {
asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t"
: "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y));
}
#undef ROTL
__inline__ __device__ uint2 ROTL(const uint2 a, const int offset) {
uint2 result;
if (offset >= 32) {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset));
} else {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 vectorize(const uint64_t x) {
uint2 result;
asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x));
return result;
}
__device__ __forceinline__ uint64_t devectorize(uint2 x) {
uint64_t result;
asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y));
return result;
}
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
uint2 nonce = vectorize(2*nce + uorv);
uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= vectorize(0xff);
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#else
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
u64 nonce = 2*nce + uorv;
u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= 0xff;
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#endif
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <set>
// algorithm parameters
#ifndef PART_BITS
// #bits used to partition edge set processing to save memory
// a value of 0 does no partitioning and is fastest
// a value of 1 partitions in two, making twice_set the
// same size as shrinkingset at about 33% slowdown
// higher values are not that interesting
#define PART_BITS 0
#endif
#ifndef IDXSHIFT
// we want sizeof(cuckoo_hash) == sizeof(twice_set), so
// CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32)
// CUCKOO_SIZE * 2 == TWICE_WORDS
// (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32
// SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5
// IDXSHIFT == 1 + PART_BITS + 5
#define IDXSHIFT (PART_BITS + 6)
#endif
// grow with cube root of size, hardly affected by trimming
#ifndef MAXPATHLEN
#define MAXPATHLEN (8 << (SIZESHIFT/3))
#endif
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// set that starts out full and gets reset by threads on disjoint words
class shrinkingset {
public:
u64 *bits;
__device__ void reset(nonce_t n) {
bits[n/64] |= 1LL << (n%64);
}
__device__ bool test(node_t n) const {
return !((bits[n/64] >> (n%64)) & 1);
}
__device__ u64 block(node_t n) const {
return ~bits[n/64];
}
};
#define PART_MASK ((1 << PART_BITS) - 1)
#define ONCE_BITS (HALFSIZE >> PART_BITS)
#define TWICE_WORDS ((2 * ONCE_BITS) / 32)
class twice_set {
public:
u32 *bits;
__device__ void reset() {
memset(bits, 0, TWICE_WORDS * sizeof(u32));
}
__device__ void set(node_t u) {
node_t idx = u/16;
u32 bit = 1 << (2 * (u%16));
u32 old = atomicOr(&bits[idx], bit);
u32 bit2 = bit<<1;
if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2);
}
__device__ u32 test(node_t u) const {
return (bits[u/16] >> (2 * (u%16))) & 2;
}
};
#define CUCKOO_SIZE (SIZE >> IDXSHIFT)
#define CUCKOO_MASK (CUCKOO_SIZE - 1)
// number of (least significant) key bits that survives leftshift by SIZESHIFT
#define KEYBITS (64-SIZESHIFT)
#define KEYMASK ((1L << KEYBITS) - 1)
#define MAXDRIFT (1L << (KEYBITS - IDXSHIFT))
class cuckoo_hash {
public:
u64 *cuckoo;
u32 nset;
void set(node_t u, node_t oldv, node_t newv) {
u64 niew = (u64)u << SIZESHIFT | newv;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 old = cuckoo[ui];
if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui] = niew;
return;
}
}
}
__device__ bool dset(node_t u, node_t oldv, node_t newv) {
u64 old, exp = (oldv ? (u64)u << SIZESHIFT | oldv : 0), nuw = (u64)u << SIZESHIFT | newv;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
old = atomicCAS((ull *)&cuckoo[ui], (ull)exp, (ull)nuw);
if (old == exp) {
return true;
}
if ((old >> SIZESHIFT) == (u & KEYMASK)) {
return false;
}
}
}
node_t operator[](node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 cu = cuckoo[ui];
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
__device__ node_t node(node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 cu = cuckoo[ui];
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
};
struct noncedge_t {
nonce_t nonce;
edge_t edge;
};
class cuckoo_ctx {
public:
siphash_ctx sip_ctx;
shrinkingset alive;
twice_set nonleaf;
cuckoo_hash cuckoo;
noncedge_t sols[MAXSOLS][PROOFSIZE];
u32 nsols;
nonce_t gpu_nonce_lim;
u32 nthreads;
cuckoo_ctx(const char* header, nonce_t gpulim, u32 n_threads) {
setheader(&sip_ctx, header);
gpu_nonce_lim = gpulim & ~0x3f; // need multiple of 64
nthreads = n_threads;
nsols = 0;
}
};
__global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
nonleaf.set(u >> PART_BITS);
}
}
}
}
__global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
if (!nonleaf.test(u >> PART_BITS)) {
alive.reset(nonce);
}
}
}
}
}
__device__ u32 dpath(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo.node(u)) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
return ~0;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
return ~0;
}
us[nu+1] = 0;
return nu;
}
__global__ void find_cycles(cuckoo_ctx *ctx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2];
shrinkingset &alive = ctx->alive;
siphash_ctx sip_ctx = ctx->sip_ctx;
cuckoo_hash &cuckoo = ctx->cuckoo;
for (nonce_t block = id*64; block < ctx->gpu_nonce_lim; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0 = dipnode(sip_ctx, nonce, 0)<<1, v0 = dipnode(sip_ctx, nonce, 1)<<1|1;
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
us[0] = u0; vs[0] = v0;
int nredo = 0;
redo: if (nredo++) printf("redo\n");
node_t u1 = cuckoo.node(u0), v1 = cuckoo.node(v0);
u32 nu, nv;
nonce_t u=u0;
for (nu = 0; u; u = cuckoo.node(u)) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
break;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
break;
}
if (u) {
//printf("oops\n");
continue;
}
us[nu+1] = 0;
nonce_t v=v0;
for (nv = 0; v; v = cuckoo.node(v)) {
if (nv++ >= MAXPATHLEN) {
while (nv-- && vs[nv] != v) ;
if (nv == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
break;
}
vs[nv] = v;
if (nv>=2 && v==vs[nv-2])
break;
}
if (v) {
//printf("oops\n");
continue;
}
vs[nv+1] = 0;
// u32 nu = dpath(cuckoo, u1, us), nv = dpath(cuckoo, v1, vs);
if (nu==~0 || nv==~0) continue;
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at %d:%d%%\n", len, id, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
u32 slot = atomicInc(&ctx->nsols, MAXINT);
if (slot < MAXSOLS) {
noncedge_t *ne = &ctx->sols[slot][0];
ne++->edge = make_edge(*us, *vs);
while (nu--)
ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd
while (nv--)
ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even
}
}
continue;
}
if (nu < nv) {
while (nu--)
if (!cuckoo.dset(us[nu+1], us[nu+2], us[nu])) goto redo;
if (!cuckoo.dset(u0, u1, v0)) goto redo;
} else {
while (nv--)
if (!cuckoo.dset(vs[nv+1], vs[nv+2], vs[nv])) goto redo;
if (!cuckoo.dset(v0, v1, u0)) goto redo;
}
}
}
}
u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo[u]) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
return ~0;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
return ~0;
}
us[nu+1] = 0;
return nu;
}
void find_more_cycles(cuckoo_ctx *ctx, cuckoo_hash &cuckoo, u64 *bits) {
node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2];
for (nonce_t block = ctx->gpu_nonce_lim; block < HALFSIZE; block += 64) {
u64 alive64 = ~bits[block/64];
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
// printf("nonce %d\n", nonce);
u32 ffs = __builtin_ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0=sipnode(&ctx->sip_ctx, nonce, 0), v0=sipnode(&ctx->sip_ctx, nonce, 1);
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
us[0] = u0; vs[0] = v0;
node_t u1 = cuckoo[u0], v1 = cuckoo[v0];
u32 nu = path(cuckoo, u1, us), nv = path(cuckoo, v1, vs);
if (nu==~0 || nv==~0) continue;
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at 0:%d%%\n", len, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
u32 slot = ctx->nsols++;
if (slot < MAXSOLS) {
noncedge_t *ne = &ctx->sols[slot][0];
ne++->edge = make_edge(*us, *vs);
while (nu--)
ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd
while (nv--)
ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even
}
}
continue;
}
if (nu < nv) {
while (nu--)
cuckoo.set(us[nu+1], us[nu+2], us[nu]);
cuckoo.set(u0, u1, v0);
} else {
while (nv--)
cuckoo.set(vs[nv+1], vs[nv+2], vs[nv]);
cuckoo.set(v0, v1, u0);
}
if (ffs & 64) break; // can't shift by 64
}
}
}
__global__ void find_nonces(cuckoo_ctx *ctx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
shrinkingset &alive = ctx->alive;
siphash_ctx sip_ctx = ctx->sip_ctx;
for (nonce_t block = id * 64; block < HALFSIZE; block += ctx->nthreads * 64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block - 1; alive64;) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
edge_t edge = make_edge(dipnode(sip_ctx,nonce,0)<<1, dipnode(sip_ctx,nonce,1)<<1|1);
for (u32 i = 0; i < ctx->nsols; i++) {
noncedge_t *sol = ctx->sols[i];
for (u32 j = 0; j < PROOFSIZE; j++) {
if (sol[j].edge == edge)
sol[j].nonce = nonce;
}
}
}
}
}
int noncedge_cmp(const void *a, const void *b) {
return ((noncedge_t *)a)->nonce - ((noncedge_t *)b)->nonce;
}
#include <unistd.h>
int main(int argc, char **argv) {
int gpu_pct = 50;
int nthreads = 1;
int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2;
int tpb = 0;
const char *header = "";
int c;
while ((c = getopt (argc, argv, "h:m:n:g:t:p:")) != -1) {
switch (c) {
case 'h':
header = optarg;
break;
case 'n':
ntrims = atoi(optarg);
break;
case 'g':
gpu_pct = atoi(optarg);
break;
case 't':
nthreads = atoi(optarg);
break;
case 'p':
tpb = atoi(optarg);
break;
}
}
if (!tpb) // if not set, then default threads per block to roughly square root of threads
for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ;
printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d%% gpu, %d threads %d per block\n",
PROOFSIZE, SIZESHIFT, header, ntrims, gpu_pct, nthreads, tpb);
u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32);
nonce_t gpu_lim = HALFSIZE*gpu_pct/100 & ~0x3f;
cuckoo_ctx ctx(header, gpu_lim, nthreads);
checkCudaErrors(hipMalloc((void**)&ctx.alive.bits, edgeBytes));
checkCudaErrors(hipMemset(ctx.alive.bits, 0, edgeBytes));
checkCudaErrors(hipMalloc((void**)&ctx.nonleaf.bits, nodeBytes));
int edgeUnit=0, nodeUnit=0;
u64 eb = edgeBytes, nb = nodeBytes;
for (; eb >= 1024; eb>>=10) edgeUnit++;
for (; nb >= 1024; nb>>=10) nodeUnit++;
printf("Using %d%cB edge and %d%cB node memory.\n",
(int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]);
cuckoo_ctx *device_ctx;
checkCudaErrors(hipMalloc((void**)&device_ctx, sizeof(cuckoo_ctx)));
hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice);
for (u32 round=0; round < ntrims; round++) {
for (u32 uorv = 0; uorv < 2; uorv++) {
for (u32 part = 0; part <= PART_MASK; part++) {
checkCudaErrors(hipMemset(ctx.nonleaf.bits, 0, nodeBytes));
hipLaunchKernelGGL(( count_node_deg), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx, uorv, part);
hipLaunchKernelGGL(( kill_leaf_edges), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx, uorv, part);
}
}
}
u64 *bits;
bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64));
assert(bits != 0);
hipMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), hipMemcpyDeviceToHost);
u64 cnt = 0;
for (int i = 0; i < HALFSIZE/64; i++)
cnt += __builtin_popcountll(~bits[i]);
u32 load = (u32)(100 * cnt / CUCKOO_SIZE);
printf("final load %d%%\n", load);
if (load >= 90) {
printf("overloaded! exiting...");
exit(0);
}
checkCudaErrors(hipFree(ctx.nonleaf.bits));
u32 cuckooBytes = CUCKOO_SIZE * sizeof(u64);
checkCudaErrors(hipMalloc((void**)&ctx.cuckoo.cuckoo, cuckooBytes));
checkCudaErrors(hipMemset(ctx.cuckoo.cuckoo, 0, cuckooBytes));
hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_cycles), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx);
hipMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), hipMemcpyDeviceToHost);
cuckoo_hash *cuckoo = new cuckoo_hash();
cuckoo->cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64));
assert(cuckoo->cuckoo != 0);
hipMemcpy(cuckoo->cuckoo, ctx.cuckoo.cuckoo, cuckooBytes, hipMemcpyDeviceToHost);
cnt = 0;
for (int i = 0; i < CUCKOO_SIZE; i++)
cnt += (cuckoo->cuckoo[i] != 0);
printf("%lu gpu edges\n", cnt);
find_more_cycles(&ctx, *cuckoo, bits);
free(cuckoo->cuckoo);
if (ctx.nsols) {
hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_nonces), dim3(nthreads/tpb), dim3(tpb), 0, 0, device_ctx);
hipMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), hipMemcpyDeviceToHost);
for (u32 i = 0; i < ctx.nsols; i++) {
printf("Solution");
qsort(ctx.sols[i], PROOFSIZE, sizeof(noncedge_t), noncedge_cmp);
for (u32 j = 0; j < PROOFSIZE; j++)
printf(" %jx", (uintmax_t)ctx.sols[i][j].nonce);
printf("\n");
}
}
checkCudaErrors(hipFree(ctx.cuckoo.cuckoo));
checkCudaErrors(hipFree(ctx.alive.bits));
return 0;
}
|
78246ea9284b08bf088cb1fb658dbab7ec1ce99e.cu
|
// Cuckoo Cycle, a memory-hard proof-of-work
// Copyright (c) 2013-2016 John Tromp
// The edge-trimming memory optimization is due to Dave Andersen
// http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html
#include <stdint.h>
#include <string.h>
#include "cuckoo.h"
#ifndef MAXSOLS
#define MAXSOLS 1
#endif
#define MAXINT (1<<31-1)
#if SIZESHIFT <= 32
typedef u32 nonce_t;
typedef u32 node_t;
typedef uint2 edge_t;
#define make_edge make_uint2
#else
typedef u64 nonce_t;
typedef u64 node_t;
typedef ulong2 edge_t;
#define make_edge make_ulong2
#endif
#include <openssl/sha.h>
typedef unsigned long long ull;
static __device__ __forceinline__ bool operator== (edge_t a, edge_t b) { return a.x == b.x && a.y == b.y; }
// d(evice s)ipnode
#if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain
static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); }
static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; }
static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) {
asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t"
: "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y));
}
#undef ROTL
__inline__ __device__ uint2 ROTL(const uint2 a, const int offset) {
uint2 result;
if (offset >= 32) {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset));
} else {
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 vectorize(const uint64_t x) {
uint2 result;
asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x));
return result;
}
__device__ __forceinline__ uint64_t devectorize(uint2 x) {
uint64_t result;
asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y));
return result;
}
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
uint2 nonce = vectorize(2*nce + uorv);
uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= vectorize(0xff);
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#else
__device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) {
u64 nonce = 2*nce + uorv;
u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce;
SIPROUND; SIPROUND;
v0 ^= nonce;
v2 ^= 0xff;
SIPROUND; SIPROUND; SIPROUND; SIPROUND;
return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK;
}
#endif
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <set>
// algorithm parameters
#ifndef PART_BITS
// #bits used to partition edge set processing to save memory
// a value of 0 does no partitioning and is fastest
// a value of 1 partitions in two, making twice_set the
// same size as shrinkingset at about 33% slowdown
// higher values are not that interesting
#define PART_BITS 0
#endif
#ifndef IDXSHIFT
// we want sizeof(cuckoo_hash) == sizeof(twice_set), so
// CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32)
// CUCKOO_SIZE * 2 == TWICE_WORDS
// (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32
// SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5
// IDXSHIFT == 1 + PART_BITS + 5
#define IDXSHIFT (PART_BITS + 6)
#endif
// grow with cube root of size, hardly affected by trimming
#ifndef MAXPATHLEN
#define MAXPATHLEN (8 << (SIZESHIFT/3))
#endif
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// set that starts out full and gets reset by threads on disjoint words
class shrinkingset {
public:
u64 *bits;
__device__ void reset(nonce_t n) {
bits[n/64] |= 1LL << (n%64);
}
__device__ bool test(node_t n) const {
return !((bits[n/64] >> (n%64)) & 1);
}
__device__ u64 block(node_t n) const {
return ~bits[n/64];
}
};
#define PART_MASK ((1 << PART_BITS) - 1)
#define ONCE_BITS (HALFSIZE >> PART_BITS)
#define TWICE_WORDS ((2 * ONCE_BITS) / 32)
class twice_set {
public:
u32 *bits;
__device__ void reset() {
memset(bits, 0, TWICE_WORDS * sizeof(u32));
}
__device__ void set(node_t u) {
node_t idx = u/16;
u32 bit = 1 << (2 * (u%16));
u32 old = atomicOr(&bits[idx], bit);
u32 bit2 = bit<<1;
if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2);
}
__device__ u32 test(node_t u) const {
return (bits[u/16] >> (2 * (u%16))) & 2;
}
};
#define CUCKOO_SIZE (SIZE >> IDXSHIFT)
#define CUCKOO_MASK (CUCKOO_SIZE - 1)
// number of (least significant) key bits that survives leftshift by SIZESHIFT
#define KEYBITS (64-SIZESHIFT)
#define KEYMASK ((1L << KEYBITS) - 1)
#define MAXDRIFT (1L << (KEYBITS - IDXSHIFT))
class cuckoo_hash {
public:
u64 *cuckoo;
u32 nset;
void set(node_t u, node_t oldv, node_t newv) {
u64 niew = (u64)u << SIZESHIFT | newv;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 old = cuckoo[ui];
if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) {
cuckoo[ui] = niew;
return;
}
}
}
__device__ bool dset(node_t u, node_t oldv, node_t newv) {
u64 old, exp = (oldv ? (u64)u << SIZESHIFT | oldv : 0), nuw = (u64)u << SIZESHIFT | newv;
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
old = atomicCAS((ull *)&cuckoo[ui], (ull)exp, (ull)nuw);
if (old == exp) {
return true;
}
if ((old >> SIZESHIFT) == (u & KEYMASK)) {
return false;
}
}
}
node_t operator[](node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 cu = cuckoo[ui];
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
__device__ node_t node(node_t u) const {
for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) {
u64 cu = cuckoo[ui];
if (!cu)
return 0;
if ((cu >> SIZESHIFT) == (u & KEYMASK)) {
assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT);
return (node_t)(cu & (SIZE-1));
}
}
}
};
struct noncedge_t {
nonce_t nonce;
edge_t edge;
};
class cuckoo_ctx {
public:
siphash_ctx sip_ctx;
shrinkingset alive;
twice_set nonleaf;
cuckoo_hash cuckoo;
noncedge_t sols[MAXSOLS][PROOFSIZE];
u32 nsols;
nonce_t gpu_nonce_lim;
u32 nthreads;
cuckoo_ctx(const char* header, nonce_t gpulim, u32 n_threads) {
setheader(&sip_ctx, header);
gpu_nonce_lim = gpulim & ~0x3f; // need multiple of 64
nthreads = n_threads;
nsols = 0;
}
};
__global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
nonleaf.set(u >> PART_BITS);
}
}
}
}
__global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) {
shrinkingset &alive = ctx->alive;
twice_set &nonleaf = ctx->nonleaf;
siphash_ctx sip_ctx = ctx->sip_ctx;
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u = dipnode(sip_ctx, nonce, uorv);
if ((u & PART_MASK) == part) {
if (!nonleaf.test(u >> PART_BITS)) {
alive.reset(nonce);
}
}
}
}
}
__device__ u32 dpath(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo.node(u)) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
return ~0;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
return ~0;
}
us[nu+1] = 0;
return nu;
}
__global__ void find_cycles(cuckoo_ctx *ctx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2];
shrinkingset &alive = ctx->alive;
siphash_ctx sip_ctx = ctx->sip_ctx;
cuckoo_hash &cuckoo = ctx->cuckoo;
for (nonce_t block = id*64; block < ctx->gpu_nonce_lim; block += ctx->nthreads*64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0 = dipnode(sip_ctx, nonce, 0)<<1, v0 = dipnode(sip_ctx, nonce, 1)<<1|1;
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
us[0] = u0; vs[0] = v0;
int nredo = 0;
redo: if (nredo++) printf("redo\n");
node_t u1 = cuckoo.node(u0), v1 = cuckoo.node(v0);
u32 nu, nv;
nonce_t u=u0;
for (nu = 0; u; u = cuckoo.node(u)) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
break;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
break;
}
if (u) {
//printf("oops\n");
continue;
}
us[nu+1] = 0;
nonce_t v=v0;
for (nv = 0; v; v = cuckoo.node(v)) {
if (nv++ >= MAXPATHLEN) {
while (nv-- && vs[nv] != v) ;
if (nv == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
break;
}
vs[nv] = v;
if (nv>=2 && v==vs[nv-2])
break;
}
if (v) {
//printf("oops\n");
continue;
}
vs[nv+1] = 0;
// u32 nu = dpath(cuckoo, u1, us), nv = dpath(cuckoo, v1, vs);
if (nu==~0 || nv==~0) continue;
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at %d:%d%%\n", len, id, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
u32 slot = atomicInc(&ctx->nsols, MAXINT);
if (slot < MAXSOLS) {
noncedge_t *ne = &ctx->sols[slot][0];
ne++->edge = make_edge(*us, *vs);
while (nu--)
ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd
while (nv--)
ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even
}
}
continue;
}
if (nu < nv) {
while (nu--)
if (!cuckoo.dset(us[nu+1], us[nu+2], us[nu])) goto redo;
if (!cuckoo.dset(u0, u1, v0)) goto redo;
} else {
while (nv--)
if (!cuckoo.dset(vs[nv+1], vs[nv+2], vs[nv])) goto redo;
if (!cuckoo.dset(v0, v1, u0)) goto redo;
}
}
}
}
u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) {
u32 nu;
for (nu = 0; u; u = cuckoo[u]) {
if (nu++ >= MAXPATHLEN) {
while (nu-- && us[nu] != u) ;
if (nu == ~0)
printf("maximum path length exceeded\n");
else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu);
return ~0;
}
us[nu] = u;
if (nu>=2 && u==us[nu-2])
return ~0;
}
us[nu+1] = 0;
return nu;
}
void find_more_cycles(cuckoo_ctx *ctx, cuckoo_hash &cuckoo, u64 *bits) {
node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2];
for (nonce_t block = ctx->gpu_nonce_lim; block < HALFSIZE; block += 64) {
u64 alive64 = ~bits[block/64];
for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs
// printf("nonce %d\n", nonce);
u32 ffs = __builtin_ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
node_t u0=sipnode(&ctx->sip_ctx, nonce, 0), v0=sipnode(&ctx->sip_ctx, nonce, 1);
if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[]
continue;
us[0] = u0; vs[0] = v0;
node_t u1 = cuckoo[u0], v1 = cuckoo[v0];
u32 nu = path(cuckoo, u1, us), nv = path(cuckoo, v1, vs);
if (nu==~0 || nv==~0) continue;
if (us[nu] == vs[nv]) {
u32 min = nu < nv ? nu : nv;
for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ;
u32 len = nu + nv + 1;
printf("% 4d-cycle found at 0:%d%%\n", len, (u32)(nonce*100L/HALFSIZE));
if (len == PROOFSIZE) {
u32 slot = ctx->nsols++;
if (slot < MAXSOLS) {
noncedge_t *ne = &ctx->sols[slot][0];
ne++->edge = make_edge(*us, *vs);
while (nu--)
ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd
while (nv--)
ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even
}
}
continue;
}
if (nu < nv) {
while (nu--)
cuckoo.set(us[nu+1], us[nu+2], us[nu]);
cuckoo.set(u0, u1, v0);
} else {
while (nv--)
cuckoo.set(vs[nv+1], vs[nv+2], vs[nv]);
cuckoo.set(v0, v1, u0);
}
if (ffs & 64) break; // can't shift by 64
}
}
}
__global__ void find_nonces(cuckoo_ctx *ctx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
shrinkingset &alive = ctx->alive;
siphash_ctx sip_ctx = ctx->sip_ctx;
for (nonce_t block = id * 64; block < HALFSIZE; block += ctx->nthreads * 64) {
u64 alive64 = alive.block(block);
for (nonce_t nonce = block - 1; alive64;) { // -1 compensates for 1-based ffs
u32 ffs = __ffsll(alive64);
nonce += ffs; alive64 >>= ffs;
edge_t edge = make_edge(dipnode(sip_ctx,nonce,0)<<1, dipnode(sip_ctx,nonce,1)<<1|1);
for (u32 i = 0; i < ctx->nsols; i++) {
noncedge_t *sol = ctx->sols[i];
for (u32 j = 0; j < PROOFSIZE; j++) {
if (sol[j].edge == edge)
sol[j].nonce = nonce;
}
}
}
}
}
int noncedge_cmp(const void *a, const void *b) {
return ((noncedge_t *)a)->nonce - ((noncedge_t *)b)->nonce;
}
#include <unistd.h>
int main(int argc, char **argv) {
int gpu_pct = 50;
int nthreads = 1;
int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2;
int tpb = 0;
const char *header = "";
int c;
while ((c = getopt (argc, argv, "h:m:n:g:t:p:")) != -1) {
switch (c) {
case 'h':
header = optarg;
break;
case 'n':
ntrims = atoi(optarg);
break;
case 'g':
gpu_pct = atoi(optarg);
break;
case 't':
nthreads = atoi(optarg);
break;
case 'p':
tpb = atoi(optarg);
break;
}
}
if (!tpb) // if not set, then default threads per block to roughly square root of threads
for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ;
printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d%% gpu, %d threads %d per block\n",
PROOFSIZE, SIZESHIFT, header, ntrims, gpu_pct, nthreads, tpb);
u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32);
nonce_t gpu_lim = HALFSIZE*gpu_pct/100 & ~0x3f;
cuckoo_ctx ctx(header, gpu_lim, nthreads);
checkCudaErrors(cudaMalloc((void**)&ctx.alive.bits, edgeBytes));
checkCudaErrors(cudaMemset(ctx.alive.bits, 0, edgeBytes));
checkCudaErrors(cudaMalloc((void**)&ctx.nonleaf.bits, nodeBytes));
int edgeUnit=0, nodeUnit=0;
u64 eb = edgeBytes, nb = nodeBytes;
for (; eb >= 1024; eb>>=10) edgeUnit++;
for (; nb >= 1024; nb>>=10) nodeUnit++;
printf("Using %d%cB edge and %d%cB node memory.\n",
(int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]);
cuckoo_ctx *device_ctx;
checkCudaErrors(cudaMalloc((void**)&device_ctx, sizeof(cuckoo_ctx)));
cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice);
for (u32 round=0; round < ntrims; round++) {
for (u32 uorv = 0; uorv < 2; uorv++) {
for (u32 part = 0; part <= PART_MASK; part++) {
checkCudaErrors(cudaMemset(ctx.nonleaf.bits, 0, nodeBytes));
count_node_deg<<<nthreads/tpb,tpb>>>(device_ctx, uorv, part);
kill_leaf_edges<<<nthreads/tpb,tpb>>>(device_ctx, uorv, part);
}
}
}
u64 *bits;
bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64));
assert(bits != 0);
cudaMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), cudaMemcpyDeviceToHost);
u64 cnt = 0;
for (int i = 0; i < HALFSIZE/64; i++)
cnt += __builtin_popcountll(~bits[i]);
u32 load = (u32)(100 * cnt / CUCKOO_SIZE);
printf("final load %d%%\n", load);
if (load >= 90) {
printf("overloaded! exiting...");
exit(0);
}
checkCudaErrors(cudaFree(ctx.nonleaf.bits));
u32 cuckooBytes = CUCKOO_SIZE * sizeof(u64);
checkCudaErrors(cudaMalloc((void**)&ctx.cuckoo.cuckoo, cuckooBytes));
checkCudaErrors(cudaMemset(ctx.cuckoo.cuckoo, 0, cuckooBytes));
cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice);
find_cycles<<<nthreads/tpb,tpb>>>(device_ctx);
cudaMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), cudaMemcpyDeviceToHost);
cuckoo_hash *cuckoo = new cuckoo_hash();
cuckoo->cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64));
assert(cuckoo->cuckoo != 0);
cudaMemcpy(cuckoo->cuckoo, ctx.cuckoo.cuckoo, cuckooBytes, cudaMemcpyDeviceToHost);
cnt = 0;
for (int i = 0; i < CUCKOO_SIZE; i++)
cnt += (cuckoo->cuckoo[i] != 0);
printf("%lu gpu edges\n", cnt);
find_more_cycles(&ctx, *cuckoo, bits);
free(cuckoo->cuckoo);
if (ctx.nsols) {
cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice);
find_nonces<<<nthreads/tpb, tpb>>>(device_ctx);
cudaMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), cudaMemcpyDeviceToHost);
for (u32 i = 0; i < ctx.nsols; i++) {
printf("Solution");
qsort(ctx.sols[i], PROOFSIZE, sizeof(noncedge_t), noncedge_cmp);
for (u32 j = 0; j < PROOFSIZE; j++)
printf(" %jx", (uintmax_t)ctx.sols[i][j].nonce);
printf("\n");
}
}
checkCudaErrors(cudaFree(ctx.cuckoo.cuckoo));
checkCudaErrors(cudaFree(ctx.alive.bits));
return 0;
}
|
6d1d325d7e22c87e4f1aa49e1427a23b6df0a349.hip
|
// !!! This is a file automatically generated by hipify!!!
// ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
options.N = options.nx*options.ny;
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = true;
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
int nx = options.nx;
int ny = options.ny;
int nt = options.nt;
// initialize cuda
int device_count;
cuda_check_status( hipGetDeviceCount(&device_count) );
if(device_count < 1) {
std::cerr << "error: there should be at least one device per node" << std::endl;
exit(-1);
}
cuda_check_status( hipSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
std::cout << "========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with CUDA" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================" << std::endl;
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0.);
ss_fill(bndS, 0.);
ss_fill(bndE, 0.);
ss_fill(bndW, 0.);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (ny - 1) * options.dx / 4;
double radius = fmin(xc, yc) / 2.0;
for (int j = 0; j < ny; j++)
{
double y = (j - 1) * options.dx;
for (int i = 0; i < nx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new[i+nx*j] = 0.1;
}
}
// TODO : ensure that the gpu copy of x_new has the up to date values that were just created
x_new.update_device();
flops_bc = 0;
flops_diff = 0;
flops_blas1 = 0;
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
FILE* output = fopen("output.bin", "w");
x_new.update_host();
fwrite(x_new.host_data(), sizeof(double), nx * ny, output);
fclose(output);
// meta data
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
// print table sumarizing results
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "Goodbye!" << std::endl;
return 0;
}
|
6d1d325d7e22c87e4f1aa49e1427a23b6df0a349.cu
|
// ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
options.N = options.nx*options.ny;
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = true;
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
int nx = options.nx;
int ny = options.ny;
int nt = options.nt;
// initialize cuda
int device_count;
cuda_check_status( cudaGetDeviceCount(&device_count) );
if(device_count < 1) {
std::cerr << "error: there should be at least one device per node" << std::endl;
exit(-1);
}
cuda_check_status( cudaSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
std::cout << "========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with CUDA" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================" << std::endl;
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0.);
ss_fill(bndS, 0.);
ss_fill(bndE, 0.);
ss_fill(bndW, 0.);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (ny - 1) * options.dx / 4;
double radius = fmin(xc, yc) / 2.0;
for (int j = 0; j < ny; j++)
{
double y = (j - 1) * options.dx;
for (int i = 0; i < nx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new[i+nx*j] = 0.1;
}
}
// TODO : ensure that the gpu copy of x_new has the up to date values that were just created
x_new.update_device();
flops_bc = 0;
flops_diff = 0;
flops_blas1 = 0;
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
FILE* output = fopen("output.bin", "w");
x_new.update_host();
fwrite(x_new.host_data(), sizeof(double), nx * ny, output);
fclose(output);
// meta data
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
// print table sumarizing results
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "Goodbye!" << std::endl;
return 0;
}
|
aaee8d94efdd8f69c61cface47254128543794ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "graph.hpp"
#include "matrix_graph.hpp"
#include "sparse_graph.hpp"
#include "list_graph.hpp"
#include "thrust_prim.hpp"
#include "cuda1_prim.hpp"
#include "cuda2_prim.hpp"
#include "generator.hpp"
#include "cpu_prim.hpp"
#include <chrono>
#include <iostream>
#ifdef WITH_BOOST
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
using namespace std::chrono;
double cuda1Runtime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = g.num_edges();
// Inputs
uint2 *outbound_vertices = new uint2[V];
uint2 *inbound_vertices = new uint2[E*2];
// Outputs
uint32_t *outbound = new uint32_t[V];
uint32_t *inbound = new uint32_t[V];
uint32_t *weights = new uint32_t[V];
// Prepare input data
cuda1Setup(g, inbound_vertices, outbound_vertices);
// initialize solution arrays with +inf
std::fill(outbound, outbound + V, UINT32_MAX);
std::fill(inbound, inbound + V, UINT32_MAX);
std::fill(weights, weights + V, UINT32_MAX);
// allow for warm-up
cuda1PrimAlgorithm(V, E, outbound_vertices, inbound_vertices, outbound, inbound, weights);
// now the real test run
begin = steady_clock::now();
for (int i = 0; i < cntRuns; ++i) {
// initialize solution arrays with +inf
std::fill(outbound, outbound + V, UINT32_MAX);
std::fill(inbound, inbound + V, UINT32_MAX);
std::fill(weights, weights + V, UINT32_MAX);
// find MST solution
cuda1PrimAlgorithm(V, E, outbound_vertices, inbound_vertices, outbound, inbound, weights);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end - begin)).count();
mst.resize(V, V - 1, g.is_directed());
// remove invalid edge
for (uint32_t i = 0; i < V; ++i) {
if ((uint32_t)inbound[i] <= V) {
mst.set(outbound[i], inbound[i], (uint32_t)weights[i]);
}
}
delete[] outbound_vertices;
delete[] inbound_vertices;
delete[] outbound;
delete[] inbound;
delete[] weights;
// return as miliseconds per round
return 1000.*runtime / cntRuns;
}
double cuda2Runtime(const Graph& g, int cntRuns, Graph& mst,
bool pinned=false, bool zerocopy=false) {
steady_clock::time_point begin, end;
double runtime;
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = 2*g.num_edges();
// Inputs
uint2 *vertices, *edges;
// Outputs
uint32_t *outbound, *inbound, *weights;
//
// Allocate the inputs and outputs depending on the memory strategy we want
// to evaluate:
//
// * pinned == false, zerocopy == false
// -> regular memory
//
// * pinned == true, zeropopy == false
// -> pin host-allocated data, but do nothing for device-allocated data
//
// * pinned == true, zerocopy == true
// -> Allocate everything on the host, use device pointers
//
if (!pinned) {
vertices = new uint2[V];
edges = new uint2[E];
outbound = new uint32_t[V-1];
inbound = new uint32_t[V-1];
weights = new uint32_t[V-1];
} else {
if (!zerocopy) {
hipHostMalloc((uint2 **) &vertices, V * sizeof(uint2));
hipHostMalloc((uint2 **) &edges, E * sizeof(uint2));
hipHostMalloc((uint32_t **) &outbound, (V-1) * sizeof(uint32_t));
hipHostMalloc((uint32_t **) &inbound, (V-1) * sizeof(uint32_t));
hipHostMalloc((uint32_t **) &weights, (V-1) * sizeof(uint32_t));
} else {
hipHostMalloc((uint2 **) &vertices, V * sizeof(uint2), hipHostMallocMapped);
hipHostMalloc((uint2 **) &edges, E * sizeof(uint2), hipHostMallocMapped);
hipHostMalloc((uint32_t **) &outbound, (V-1) * sizeof(uint32_t), hipHostMallocMapped);
hipHostMalloc((uint32_t **) &inbound, (V-1) * sizeof(uint32_t), hipHostMallocMapped);
hipHostMalloc((uint32_t **) &weights, (V-1) * sizeof(uint32_t), hipHostMallocMapped);
}
}
// Prepare input data
cuda2Setup(g, vertices, edges);
// allow for warm-up
cuda2PrimAlgorithm(vertices, V, edges, E, outbound, inbound, weights, zerocopy);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
// find MST solution
cuda2PrimAlgorithm(vertices, V, edges, E, outbound, inbound, weights, zerocopy);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
mst.resize(V, V-1, g.is_directed());
for (uint32_t i = 0; i < V-1; ++i) {
mst.set(outbound[i], inbound[i], (uint32_t) weights[i]);
}
if (!pinned) {
delete[] vertices;
delete[] edges;
delete[] outbound;
delete[] inbound;
delete[] weights;
} else {
hipHostFree(vertices);
hipHostFree(edges);
hipHostFree(outbound);
hipHostFree(inbound);
hipHostFree(weights);
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
double thrustRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
// prepare data for thrust
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = g.num_edges();
thrust::host_vector<uint2> vertices(V);
thrust::host_vector<uint2> edges(2*E);
thrustSetup(g, vertices, edges);
thrust::host_vector<uint32_t> outbound(V);
thrust::host_vector<uint32_t> inbound(V);
thrust::host_vector<uint32_t> weights(V);
// allow for warm-up
thrustPrimAlgorithm(vertices, edges, outbound, inbound, weights, V, E);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
// find MST solution
thrustPrimAlgorithm(vertices, edges, outbound, inbound, weights, V, E);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// Store the results in mst
mst.resize(V, V-1, g.is_directed());
for (uint32_t i = 0; i < V-1; ++i) {
mst.set(outbound[i], inbound[i], (uint32_t) weights[i]);
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
template <class T_GRAPH>
double cpuRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
// allow for warm-up, store the result
cpuPrimAlgorithm(g, mst);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
MatrixGraph mst2;
// find MST solution
cpuPrimAlgorithm(g, mst2);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
#ifdef WITH_BOOST
struct do_nothing_dijkstra_visitor : boost::default_dijkstra_visitor {};
double boostRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
BoostGraph boost_g;
double runtime;
// allow for warm-up
g.toBoost(boost_g);
auto p = std::vector<boost::graph_traits<BoostGraph>::vertex_descriptor >(g.num_vertices());
boost::prim_minimum_spanning_tree(boost_g, &p[0]);
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
boost::prim_minimum_spanning_tree(boost_g, &p[0]);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// store the result
mst.resize(g.num_vertices(), g.num_vertices()-1, g.is_directed());
for (std::size_t i = 0; i != p.size(); ++i) {
if (p[i] != i) {
mst.set(i, p[i], g(i, p[i]));
}
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
#endif
void runParamSet(std::ostream& os, int num_vertices, int weight_range, float density,
int numReplica, int cntRuns, uint64_t seed) {
for (int i=0; i<numReplica; ++i) {
// create an undirected graph, using a different seed in each replica
MatrixGraph g;
uint64_t itseed = seed+i;
generator(g, num_vertices, 0, weight_range, density, false, itseed);
// run through all implementations and get runtime
double runtime;
/*ListGraph cpu_l_mst;
runtime = cpuRuntime<ListGraph>(g, cntRuns, cpu_l_mst);
// output to file
os << "cpu_l," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cpu_l_mst.sum_weights()
<< std::endl;
*/
#ifdef WITH_BOOST
// run through boost implementation
ListGraph boost_mst;
runtime = boostRuntime(g, cntRuns, boost_mst);
// output to file
os << "boost," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << boost_mst.sum_weights()
<< std::endl;
#endif
/* */
// run through thrust implementation
ListGraph thrust_mst;
runtime = thrustRuntime(g, cntRuns, thrust_mst);
// output to file
os << "thrust," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << thrust_mst.sum_weights()
<< std::endl;
/* */
// run through CUDA implementation #1
ListGraph cuda1_mst;
runtime = cuda1Runtime(g, cntRuns, cuda1_mst);
// output to file
os << "cuda1," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda1_mst.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - regular
ListGraph cuda2_mst;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst, false, false);
// output to file
os << "cuda2," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - pinned memory
ListGraph cuda2_mst_pinned;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst_pinned, true, false);
// output to file
os << "cuda2-pinned," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst_pinned.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - pinned memory
ListGraph cuda2_mst_zero;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst_zero, true, true);
// output to file
os << "cuda2-zero," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst_zero.sum_weights()
<< std::endl;
}
}
int main(int argc, char* argv[]) {
std::cout << "implementation,run,seed,vertices,density,weight_range,runtime,min" << std::endl;
//
// Test batch: Effects of Density (constant node size)
//
runParamSet(std::cout, 4096, 5000, 0.01, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.05, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.3, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.5, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.7, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 1.0, 3, 1, 42);
// Test batch: Effects of Vertex count (constant density)
/*
runParamSet(std::cout, 10, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 50, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 100, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 500, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 1000 , 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 5000 , 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 10000, 5000, 0.1, 3, 1, 42);
*/
// Test batch: CUDA1 vs CUDA2
/*
runParamSet(std::cout, 4095, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4097, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16383, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16384, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16385, 5000, 0.1, 3, 1, 42);
*/
}
|
aaee8d94efdd8f69c61cface47254128543794ac.cu
|
#include "graph.hpp"
#include "matrix_graph.hpp"
#include "sparse_graph.hpp"
#include "list_graph.hpp"
#include "thrust_prim.hpp"
#include "cuda1_prim.hpp"
#include "cuda2_prim.hpp"
#include "generator.hpp"
#include "cpu_prim.hpp"
#include <chrono>
#include <iostream>
#ifdef WITH_BOOST
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
using namespace std::chrono;
double cuda1Runtime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = g.num_edges();
// Inputs
uint2 *outbound_vertices = new uint2[V];
uint2 *inbound_vertices = new uint2[E*2];
// Outputs
uint32_t *outbound = new uint32_t[V];
uint32_t *inbound = new uint32_t[V];
uint32_t *weights = new uint32_t[V];
// Prepare input data
cuda1Setup(g, inbound_vertices, outbound_vertices);
// initialize solution arrays with +inf
std::fill(outbound, outbound + V, UINT32_MAX);
std::fill(inbound, inbound + V, UINT32_MAX);
std::fill(weights, weights + V, UINT32_MAX);
// allow for warm-up
cuda1PrimAlgorithm(V, E, outbound_vertices, inbound_vertices, outbound, inbound, weights);
// now the real test run
begin = steady_clock::now();
for (int i = 0; i < cntRuns; ++i) {
// initialize solution arrays with +inf
std::fill(outbound, outbound + V, UINT32_MAX);
std::fill(inbound, inbound + V, UINT32_MAX);
std::fill(weights, weights + V, UINT32_MAX);
// find MST solution
cuda1PrimAlgorithm(V, E, outbound_vertices, inbound_vertices, outbound, inbound, weights);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end - begin)).count();
mst.resize(V, V - 1, g.is_directed());
// remove invalid edge
for (uint32_t i = 0; i < V; ++i) {
if ((uint32_t)inbound[i] <= V) {
mst.set(outbound[i], inbound[i], (uint32_t)weights[i]);
}
}
delete[] outbound_vertices;
delete[] inbound_vertices;
delete[] outbound;
delete[] inbound;
delete[] weights;
// return as miliseconds per round
return 1000.*runtime / cntRuns;
}
double cuda2Runtime(const Graph& g, int cntRuns, Graph& mst,
bool pinned=false, bool zerocopy=false) {
steady_clock::time_point begin, end;
double runtime;
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = 2*g.num_edges();
// Inputs
uint2 *vertices, *edges;
// Outputs
uint32_t *outbound, *inbound, *weights;
//
// Allocate the inputs and outputs depending on the memory strategy we want
// to evaluate:
//
// * pinned == false, zerocopy == false
// -> regular memory
//
// * pinned == true, zeropopy == false
// -> pin host-allocated data, but do nothing for device-allocated data
//
// * pinned == true, zerocopy == true
// -> Allocate everything on the host, use device pointers
//
if (!pinned) {
vertices = new uint2[V];
edges = new uint2[E];
outbound = new uint32_t[V-1];
inbound = new uint32_t[V-1];
weights = new uint32_t[V-1];
} else {
if (!zerocopy) {
cudaMallocHost((uint2 **) &vertices, V * sizeof(uint2));
cudaMallocHost((uint2 **) &edges, E * sizeof(uint2));
cudaMallocHost((uint32_t **) &outbound, (V-1) * sizeof(uint32_t));
cudaMallocHost((uint32_t **) &inbound, (V-1) * sizeof(uint32_t));
cudaMallocHost((uint32_t **) &weights, (V-1) * sizeof(uint32_t));
} else {
cudaHostAlloc((uint2 **) &vertices, V * sizeof(uint2), cudaHostAllocMapped);
cudaHostAlloc((uint2 **) &edges, E * sizeof(uint2), cudaHostAllocMapped);
cudaHostAlloc((uint32_t **) &outbound, (V-1) * sizeof(uint32_t), cudaHostAllocMapped);
cudaHostAlloc((uint32_t **) &inbound, (V-1) * sizeof(uint32_t), cudaHostAllocMapped);
cudaHostAlloc((uint32_t **) &weights, (V-1) * sizeof(uint32_t), cudaHostAllocMapped);
}
}
// Prepare input data
cuda2Setup(g, vertices, edges);
// allow for warm-up
cuda2PrimAlgorithm(vertices, V, edges, E, outbound, inbound, weights, zerocopy);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
// find MST solution
cuda2PrimAlgorithm(vertices, V, edges, E, outbound, inbound, weights, zerocopy);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
mst.resize(V, V-1, g.is_directed());
for (uint32_t i = 0; i < V-1; ++i) {
mst.set(outbound[i], inbound[i], (uint32_t) weights[i]);
}
if (!pinned) {
delete[] vertices;
delete[] edges;
delete[] outbound;
delete[] inbound;
delete[] weights;
} else {
cudaFreeHost(vertices);
cudaFreeHost(edges);
cudaFreeHost(outbound);
cudaFreeHost(inbound);
cudaFreeHost(weights);
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
double thrustRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
// prepare data for thrust
const uint32_t V = g.num_vertices();
// Each edge is present twice: once from each vertex
const uint32_t E = g.num_edges();
thrust::host_vector<uint2> vertices(V);
thrust::host_vector<uint2> edges(2*E);
thrustSetup(g, vertices, edges);
thrust::host_vector<uint32_t> outbound(V);
thrust::host_vector<uint32_t> inbound(V);
thrust::host_vector<uint32_t> weights(V);
// allow for warm-up
thrustPrimAlgorithm(vertices, edges, outbound, inbound, weights, V, E);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
// find MST solution
thrustPrimAlgorithm(vertices, edges, outbound, inbound, weights, V, E);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// Store the results in mst
mst.resize(V, V-1, g.is_directed());
for (uint32_t i = 0; i < V-1; ++i) {
mst.set(outbound[i], inbound[i], (uint32_t) weights[i]);
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
template <class T_GRAPH>
double cpuRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
double runtime;
// allow for warm-up, store the result
cpuPrimAlgorithm(g, mst);
// now the real test run
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
MatrixGraph mst2;
// find MST solution
cpuPrimAlgorithm(g, mst2);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
#ifdef WITH_BOOST
struct do_nothing_dijkstra_visitor : boost::default_dijkstra_visitor {};
double boostRuntime(const Graph& g, int cntRuns, Graph& mst) {
steady_clock::time_point begin, end;
BoostGraph boost_g;
double runtime;
// allow for warm-up
g.toBoost(boost_g);
auto p = std::vector<boost::graph_traits<BoostGraph>::vertex_descriptor >(g.num_vertices());
boost::prim_minimum_spanning_tree(boost_g, &p[0]);
begin = steady_clock::now();
for (int i=0; i<cntRuns; ++i) {
boost::prim_minimum_spanning_tree(boost_g, &p[0]);
}
end = steady_clock::now();
runtime = (duration_cast<duration<double>>(end-begin)).count();
// store the result
mst.resize(g.num_vertices(), g.num_vertices()-1, g.is_directed());
for (std::size_t i = 0; i != p.size(); ++i) {
if (p[i] != i) {
mst.set(i, p[i], g(i, p[i]));
}
}
// return as miliseconds per round
return 1000.*runtime/cntRuns;
}
#endif
void runParamSet(std::ostream& os, int num_vertices, int weight_range, float density,
int numReplica, int cntRuns, uint64_t seed) {
for (int i=0; i<numReplica; ++i) {
// create an undirected graph, using a different seed in each replica
MatrixGraph g;
uint64_t itseed = seed+i;
generator(g, num_vertices, 0, weight_range, density, false, itseed);
// run through all implementations and get runtime
double runtime;
/*ListGraph cpu_l_mst;
runtime = cpuRuntime<ListGraph>(g, cntRuns, cpu_l_mst);
// output to file
os << "cpu_l," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cpu_l_mst.sum_weights()
<< std::endl;
*/
#ifdef WITH_BOOST
// run through boost implementation
ListGraph boost_mst;
runtime = boostRuntime(g, cntRuns, boost_mst);
// output to file
os << "boost," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << boost_mst.sum_weights()
<< std::endl;
#endif
/* */
// run through thrust implementation
ListGraph thrust_mst;
runtime = thrustRuntime(g, cntRuns, thrust_mst);
// output to file
os << "thrust," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << thrust_mst.sum_weights()
<< std::endl;
/* */
// run through CUDA implementation #1
ListGraph cuda1_mst;
runtime = cuda1Runtime(g, cntRuns, cuda1_mst);
// output to file
os << "cuda1," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda1_mst.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - regular
ListGraph cuda2_mst;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst, false, false);
// output to file
os << "cuda2," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - pinned memory
ListGraph cuda2_mst_pinned;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst_pinned, true, false);
// output to file
os << "cuda2-pinned," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst_pinned.sum_weights()
<< std::endl;
// run through CUDA implementation #2 - pinned memory
ListGraph cuda2_mst_zero;
runtime = cuda2Runtime(g, cntRuns, cuda2_mst_zero, true, true);
// output to file
os << "cuda2-zero," << i
<< "," << itseed
<< "," << num_vertices
<< "," << density
<< "," << weight_range
<< "," << runtime
<< "," << cuda2_mst_zero.sum_weights()
<< std::endl;
}
}
int main(int argc, char* argv[]) {
std::cout << "implementation,run,seed,vertices,density,weight_range,runtime,min" << std::endl;
//
// Test batch: Effects of Density (constant node size)
//
runParamSet(std::cout, 4096, 5000, 0.01, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.05, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.3, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.5, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.7, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 1.0, 3, 1, 42);
// Test batch: Effects of Vertex count (constant density)
/*
runParamSet(std::cout, 10, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 50, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 100, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 500, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 1000 , 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 5000 , 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 10000, 5000, 0.1, 3, 1, 42);
*/
// Test batch: CUDA1 vs CUDA2
/*
runParamSet(std::cout, 4095, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4096, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 4097, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16383, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16384, 5000, 0.1, 3, 1, 42);
runParamSet(std::cout, 16385, 5000, 0.1, 3, 1, 42);
*/
}
|
9f1af01723d27095592eae1cb1a8898a02b18e69.hip
|
// !!! This is a file automatically generated by hipify!!!
// This version: c04: 6.45 seconds.
/*
This version is "NO Streaming" version.
0102 TODOs;
(V) 1. Correctness
// c01~07, p31~36
// c06 after: TOO SLOW to get answerQQ
(V) 2. Larger Blocking_Factor B
( ) 3. Initial padding (Remove if(i<n && j<n))
( ) 4. Asynchronous Peer Copy
( ) 5. Use different streams in hipMemcpyPeerAsync! (stream 0~3)
( ) 6. Less hipDeviceSynchronize();
( ) 7. #pragma omp parallel
(V) N. Hide printf into #ifdef DEBUG_PHASE1, PHASE2, PHASE3
*/
// System includes
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <omp.h>
#include <time.h>
#define TIME
// #define CUDA_NVPROF
// #define DEBUG_DIST
// #define DEBUG_DEVICE_DIST
// #define DEBUG_DEVICE_DIST1
// #define DEBUG_PHASE1
// #define DEBUG_PHASE2
// #define DEBUG_PHASE3
// #define CHECK_CORRECTNESS
const int BLOCKING_FACTOR = 32; // 32, 16, 8, 4, 2
const int INF = ((1 << 30) - 1);
// Global var stored in Data Section.
// const int V = 40010;
void input(char* inFileName);
void output(char* outFileName);
void print_ans(int num_V, char* ans_file);
void print_Dist(int num_V);
void block_FW(int B);
// void block_FW_small_n(int B);
void block_FW_MultiGPU_Old(int B);
void block_FW_MultiGPU(int B);
int ceil(int a, int b); // min num that >= a/b
// floor: max num <= a/b
int floor(int a, int b);
__device__ inline int Addr(int matrixIdx, int i, int j, int N){
return( N*N*matrixIdx + i*N + j);
}
// W: width, H: height
// __device__ inline int Addr2(int matrixIdx, int i, int j, int W, int H){
// return( W*H*matrixIdx + i*W + j);
// }
// Device_Boundary: in i direction, where you Split data Initially.
// PHASE 1 : ONE Block do k iterations with B*B threads.
// __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i<device_Boundary && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
// phase 1 for device 1
__global__ void cal_1(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i >= device_Boundary && i<n && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
// __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal3(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
// int i = block_start_y* B + blockIdx.y * B + threadIdx.y;
// int j = block_start_x* B + blockIdx.x * B + threadIdx.x;
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
// S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
// S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<device_Boundary && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<device_Boundary && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
// This for-loop CANNOT be parallelize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
/// KEY!! Don't USE % on K.
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// __syncthreads();
// if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) {
// S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)];
// }
// i , k // k , j // i , j
// if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
// Phase 3 for device 1.
__global__ void cal3_1(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
if( i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if((Round*B + threadIdx.y)<n && j<n ) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i>=device_Boundary && i<n && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
int MAX_GPU_COUNT = 32;
int n, m;
// static int Dist[V][V];
int* Dist;
int * Dist_1;
int main(int argc, char* argv[]) {
#ifdef TIME
// struct timespec start, end, temp;
struct timespec total_starttime;
struct timespec total_temp;
struct timespec start;
struct timespec end;
struct timespec temp;
double IO_time=0.0;
double Total_time = 0.0;
clock_gettime(CLOCK_MONOTONIC, &total_starttime);
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
input(argv[1]);
#ifdef DEBUG_DEVICE_DIST
Dist_1 = (int*)malloc(sizeof(unsigned int)*n*n);
#endif
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
#endif
// printf("%f second on input\n", time_used);
// we have num_v, num_e, adj_matrix (Dist[V][V]) now
// int B = 512;
// Note: Since B*B threads, maximum B : 32 (MAX 1024 threads per block)
int B;
B = BLOCKING_FACTOR;
if(n < B){
block_FW_MultiGPU_Old(B);
}
else{
block_FW_MultiGPU(B);
}
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
output(argv[2]);
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
// IO Time
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
// Total Time
if ((end.tv_nsec - total_starttime.tv_nsec) < 0) {
total_temp.tv_sec = end.tv_sec-total_starttime.tv_sec-1;
total_temp.tv_nsec = 1000000000 + end.tv_nsec - total_starttime.tv_nsec;
} else {
total_temp.tv_sec = end.tv_sec - total_starttime.tv_sec;
total_temp.tv_nsec = end.tv_nsec - total_starttime.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
Total_time = total_temp.tv_sec + (double) total_temp.tv_nsec / 1000000000.0;
#endif
#ifdef TIME
printf("IO Time: %.8f seconds\n", IO_time);
printf("Total Time: %.8f seconds\n",Total_time);
#endif
printf("========== Comparing results... ===========\n");
#ifdef DEBUG_DIST
print_Dist(n);
#endif
#ifdef CHECK_CORRECTNESS
print_ans(n, argv[3]);
#endif
printf("Job Finished\n");
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file); // n = num_vertices
fread(&m, sizeof(int), 1, file); // m = num_edges
printf("V: %d, E: %d\n",n,m);
Dist = (int*) malloc(sizeof(int)*n*n);
// Initialize adjacency matrix
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i*n+j] = 0;
// Dist[i][j] = 0;
} else {
Dist[i*n+j] = INF;
// Dist[i][j] = INF;
}
}
}
// Sequentially read input edges and fill them into adj matrix.
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
// Dist[pair[0]][pair[1]] = pair[2];
Dist[ pair[0]*n+ pair[1]] = pair[2];
}
fclose(file);
}
void print_ans(int num_V, char* ans_file){
bool wrong = false;
FILE* file = fopen(ans_file, "rb");
int* Ans = (int*)malloc(sizeof(int)*n*n);
fread(Ans, sizeof(int), n*n, file);
if(num_V > 15) num_V = 15;
for(int i=0; i<num_V*num_V; i++){
if(Dist[i] != Ans[i]){
wrong = true;
printf("Wrong at offset %d, expected %d but get %d\n", i*4, Ans[i], Dist[i]);
printf("Fron %d to %d , cost: %d\n", (i/n), (i%n), Ans[i] );
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
if(!wrong) printf(" ======= Congratulation! =========\n");
printf("======== Your Dist ==========\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Dist[i*num_V+j]);
else printf("%d ", Dist[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
printf("======== ANSWER ==========\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Ans[i*num_V+j]);
else printf("%d ", Ans[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
}
void print_Dist(int num_V){
printf("========= Dist ============\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Dist[i*num_V+j]);
else printf("%d ", Dist[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF;
}
fwrite(Dist+i*n, sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
int floor(int a, int b){ return a/b >>1<<1; } // remove LSB ( discard the remainder.)
// 1204: Idea1 : one stream with 9 serialize kernel launch?
// memory to pass to GPU: B, r, r, r, 1, 1. ALL constant! No memory copy.
const int device_0 = 0;
const int device_1 = 1;
const int cudaEnablePeerAccess_Flags = 0;
#define NUM_THREAD 2
// For Large n.: Don't use Synchronize.
// n > 5000
void block_FW_MultiGPU(int B) {
printf("Large n : \n");
printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
printf(" %d * %d block\n",B,B);
int round = ceil(n, B);
// int cur_device_number;
int *device_Dist;
int *device_Dist_1;
int canGPU0AccessGPU1, canGPU1AccessGPU0;
int device_0_Boundary = ceil(n, 2); // e.g. 5/2 -> 3, 160/2 -> 80.
printf("ceil(%d, 2) :%d\n",n,device_0_Boundary);
printf("ceil % B remainder : %d\n",device_0_Boundary%B);
// Avoid cross pivot.
// 80 % 32 = 16. => (80 - 16 + 32) = 96.
// ceil(999,2) = 500, 500 % 32 = 20. 500 - 20 + 32 = 512.
if( device_0_Boundary%B !=0) device_0_Boundary = (device_0_Boundary- (device_0_Boundary%B) + B);
printf("device_0_Boundary: %d\n",device_0_Boundary);
// Record Computation time
#ifdef TIME
hipSetDevice(0);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
#endif
#ifdef TIME
float Total_comm_time = 0;
hipSetDevice(1);
hipEvent_t Commstart_device_1, Commstop_device_1;
hipEventCreate(&Commstart_device_1);
hipEventCreate(&Commstop_device_1);
hipSetDevice(0);
hipEvent_t Commstart, Commstop;
hipEventCreate(&Commstart);
hipEventCreate(&Commstop);
hipEventRecord(Commstart);
#endif
// Data Partition 1 : Split Top to device 0
// Bottom to device 1.
#pragma omp parallel num_threads(NUM_THREAD) //reduction(+:pixels)
{
int omp_id, omp_thread_num;
omp_id = omp_get_thread_num();
omp_thread_num = omp_get_num_threads();
if(omp_id==0){
hipSetDevice(0);
hipDeviceCanAccessPeer ( &canGPU0AccessGPU1, device_0, device_1 );
if(canGPU0AccessGPU1==1){
printf("Can 0 access 1? %d\n",canGPU0AccessGPU1);
hipDeviceEnablePeerAccess ( device_1, cudaEnablePeerAccess_Flags );
hipMalloc(&device_Dist, n * n* sizeof(unsigned int));
#ifdef TIME
hipEventRecord(Commstart);
#endif
hipMemcpyAsync(device_Dist, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice);
// hipMemcpyAsync(device_Dist, Dist, n*device_0_Boundary*sizeof(unsigned int), hipMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 0\n",omp_id);
}
else{
printf("Error, gpu 0 cannot directly access gpu 1\n");
// return 2;
}
}
else{
hipSetDevice(1);
hipDeviceCanAccessPeer ( &canGPU1AccessGPU0, device_1, device_0 );
if(canGPU1AccessGPU0==1){
printf("Can 1 access 0? %d\n",canGPU1AccessGPU0);
hipDeviceEnablePeerAccess ( device_0, cudaEnablePeerAccess_Flags );
// hipGetDevice(&cur_device_number);
hipMalloc(&device_Dist_1, n * n* sizeof(unsigned int));
hipMemcpyAsync(device_Dist_1, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice);
// hipMemcpyAsync(device_Dist_1+device_0_Boundary*n, Dist+device_0_Boundary*n, ( n*n -n*device_0_Boundary)*sizeof(unsigned int), hipMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 1\n",omp_id);
}
else{
printf("Error, gpu 1 cannot directly access gpu 0\n");
// return 2;
}
}
}
#ifdef TIME
float Commtime;
hipSetDevice(0);
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime, Commstart, Commstop);
printf("H2D copy took %.8f seconds\n",Commtime/1000);
Total_comm_time += Commtime;
#endif
#ifdef DEBUG_DEVICE_DIST
printf("========== Initial Condition =========\n");
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("Initial, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("Initial, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
dim3 num_threads(B,B);
for (int r = 0; r < round; ++r) {
#ifdef DEBUG_DIST
printf("========== Round %d ================\n",r);
// print_Dist(n);
#endif
// printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
if(r*B < device_0_Boundary) { // Device 0 do pivot
// printf("Pivot at GPU 0!\n");
hipSetDevice(0);
hipLaunchKernelGGL(( cal), dim3(1), dim3(num_threads) , sizeof(int)*B*B, 0, device_0_Boundary, device_Dist, n, B, r, r, r);
#ifdef TIME
hipEventRecord(Commstart);
#endif
// Copy WHOLE pivot ROW to the other device.
for(int i= r*B; i<(r+1)*B && i<n; i++)
hipMemcpyPeer(device_Dist_1+i*n,1, device_Dist+i*n,0 , n*sizeof(unsigned int));
#ifdef TIME
float Commtime_Phase1;
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime_Phase1, Commstart, Commstop);
// printf("Phase1 mem copy took %.8f seconds\n",Commtime_Phase1/1000);
Total_comm_time += Commtime_Phase1;
#endif
}
else{ // Device 1 do then copy to the other.
hipSetDevice(1);
// printf("Pivot at GPU 1!\n");
hipLaunchKernelGGL(( cal_1), dim3(1), dim3(num_threads) , sizeof(int)*B*B, 0, device_0_Boundary, device_Dist_1, n, B, r, r, r);
// Copy pivot ROW to the other device.
#ifdef TIME
hipEventRecord(Commstart_device_1);
#endif
for(int i= r*B; i<(r+1)*B && i<n; i++)
hipMemcpyPeer(device_Dist+i*n,0, device_Dist_1+i*n,1 , n*sizeof(unsigned int));
#ifdef TIME
float Commtime_Phase1;
hipEventRecord(Commstop_device_1);
hipEventSynchronize(Commstop_device_1); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime_Phase1, Commstart_device_1, Commstop_device_1);
// printf("Phase1 mem copy took %.8f seconds\n",Commtime_Phase1/1000);
Total_comm_time += Commtime_Phase1;
#endif
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After phase1, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After phase1, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* ----------- Phase 2 ------------- */
hipSetDevice(0);
// Compute four sub-phase
if(r*B < device_0_Boundary){
// TODO : Modify cal() and cal3() : Need to pass boundary into!!
// 2-1
if(r !=0){
dim3 nB(1,r);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_0_Boundary, device_Dist, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_0_Boundary, device_Dist, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
hipLaunchKernelGGL(( cal3), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist, n, B, r, r + 1, r);
}
}
// Compute ONLY 2-3
else{
//2-3
if(r!=0){
dim3 nB(r,1);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_0_Boundary, device_Dist, n, B, r, 0, r);
}
}
hipSetDevice(1);
// Compute ONLY 2-4
if(r*B < device_0_Boundary){
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
hipLaunchKernelGGL(( cal3_1), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist_1, n, B, r, r + 1, r);
}
}
// Compute four sub-phase
else{
// 2-1
if(r !=0){
dim3 nB(1,r);
hipLaunchKernelGGL(( cal3_1), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_0_Boundary, device_Dist_1, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
hipLaunchKernelGGL(( cal3_1), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist_1, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
hipLaunchKernelGGL(( cal3_1), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_0_Boundary, device_Dist_1, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
hipLaunchKernelGGL(( cal3_1), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist_1, n, B, r, r + 1, r);
}
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After PHASE 2, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After PHASE 2, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* ----------- Phase 3 ------------- */
hipSetDevice(0);
if(r != 0){
dim3 nB(r,r);
//hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, 0, r, r);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist, n, B, r, 0, 0);
}
if(r !=0 && (round-r-1) !=0){
dim3 nB(r,(round-r-1));
//hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist, n, B, r, 0, r + 1);
}
if(r !=0 && round-r-1 !=0){
dim3 nB((round-r-1),r);
//hipLaunchKernelGGL(( cal3), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
hipLaunchKernelGGL(( cal3), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist, n, B, r, r + 1, 0);
}
if(round-r-1 !=0){
dim3 nB_p3(round - r - 1, round - r - 1);
//hipLaunchKernelGGL(( cal3), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
hipLaunchKernelGGL(( cal3), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist, n, B, r, r + 1, r + 1);
}
hipSetDevice(1);
if(r != 0){
dim3 nB(r,r);
//hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, 0, r, r);
hipLaunchKernelGGL(( cal3_1), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist_1, n, B, r, 0, 0);
}
if(r !=0 && (round-r-1) !=0){
dim3 nB(r,(round-r-1));
//hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
hipLaunchKernelGGL(( cal3_1), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist_1, n, B, r, 0, r + 1);
}
if(r !=0 && round-r-1 !=0){
dim3 nB((round-r-1),r);
//hipLaunchKernelGGL(( cal3), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
hipLaunchKernelGGL(( cal3_1), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_0_Boundary, device_Dist_1, n, B, r, r + 1, 0);
}
if(round-r-1 !=0){
dim3 nB_p3(round - r - 1, round - r - 1);
//hipLaunchKernelGGL(( cal3), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
hipLaunchKernelGGL(( cal3_1), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_0_Boundary,device_Dist_1, n, B, r, r + 1, r + 1);
}
#ifdef DEBUG_DEVICE_DIST
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After PHASE3, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
hipMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After PHASE3, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
} // end for(r=0; r<round; r++)
#ifdef TIME
hipSetDevice(0);
hipEventRecord(Commstart);
#endif
// Independently copy back to CPU
hipMemcpyAsync(Dist, device_Dist, n*device_0_Boundary*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpyAsync(Dist+device_0_Boundary*n, device_Dist_1+device_0_Boundary*n, ( n*n - n*device_0_Boundary) *sizeof(unsigned int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#ifdef TIME
float Commtime_D2H;
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime_D2H, Commstart, Commstop);
printf("D2H copy took %.8f seconds\n",Commtime_D2H/1000);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime_D2H;
printf("Communication %.8f seconds\n",Total_comm_time/1000);
#endif
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop); // WAIT until 'stop' complete.
float time;
hipEventElapsedTime(&time, start, stop);
// printf("Took %.8f milliseconds",time);
printf("Computation(raw): Took %.8f seconds\n",(time)/1000);
printf("Computation: Took %.8f seconds\n",(time-Total_comm_time)/1000);
#endif
}
/* ================ For Small n ====================== */
/* Define small n cal & cal3 */
__global__ void cal_small(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i<n && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
__global__ void cal3_small(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
// int i = block_start_y* B + blockIdx.y * B + threadIdx.y;
// int j = block_start_x* B + blockIdx.x * B + threadIdx.x;
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
// S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
// S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
// This for-loop CANNOT be parallelize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
/// KEY!! Don't USE % on K.
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// __syncthreads();
// if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) {
// S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)];
// }
// i , k // k , j // i , j
// if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
void block_FW_MultiGPU_Old(int B){
printf("Small n: \n");
printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
printf(" %d * %d block\n",B,B);
int round = ceil(n, B);
// int cur_device_number;
// hipMemcpy();
// int *device_Dist;
int *device_Dist;
int *device_Dist_1;
int canGPU0AccessGPU1, canGPU1AccessGPU0;
#ifdef TIME
float Total_comm_time = 0;
float Commtime;
hipEvent_t Commstart, Commstop;
hipEventCreate(&Commstart);
hipEventCreate(&Commstop);
#endif
#pragma omp parallel num_threads(NUM_THREAD) //reduction(+:pixels)
{
int omp_id, omp_thread_num;
omp_id = omp_get_thread_num();
omp_thread_num = omp_get_num_threads();
if(omp_id==0){
hipSetDevice(0);
hipDeviceCanAccessPeer ( &canGPU0AccessGPU1, device_0, device_1 );
if(canGPU0AccessGPU1==1){
printf("Can 0 access 1? %d\n",canGPU0AccessGPU1);
hipDeviceEnablePeerAccess ( device_1, cudaEnablePeerAccess_Flags );
hipMalloc(&device_Dist, n * n* sizeof(unsigned int));
#ifdef TIME
hipEventRecord(Commstart);
#endif
hipMemcpyAsync(device_Dist, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 0\n",omp_id);
}
else{
printf("Error, gpu 0 cannot directly access gpu 1\n");
// return 2;
}
}
else{
hipSetDevice(1);
hipDeviceCanAccessPeer ( &canGPU1AccessGPU0, device_1, device_0 );
if(canGPU1AccessGPU0==1){
printf("Can 1 access 0? %d\n",canGPU1AccessGPU0);
hipDeviceEnablePeerAccess ( device_0, cudaEnablePeerAccess_Flags );
// hipGetDevice(&cur_device_number);
hipMalloc(&device_Dist_1, n * n* sizeof(unsigned int));
// hipMemcpyAsync(device_Dist_1, Dist, n* n*sizeof(unsigned int), hipMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 1\n",omp_id);
}
else{
printf("Error, gpu 1 cannot directly access gpu 0\n");
// return 2;
}
}
}
#ifdef TIME
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
#ifdef TIME
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
#endif
// 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1
dim3 num_threads(B,B);
for (int r = 0; r < round; ++r) {
#ifdef DEBUG_DIST
printf("========== Round %d ================\n",r);
// print_Dist(n);
#endif
// printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
// EX: 3*3 Blocks. At iteration k (round r), send D(r,r)
// cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1);
hipSetDevice(0);
hipLaunchKernelGGL(( cal_small), dim3(1), dim3(num_threads) , sizeof(int)*B*B, 0, device_Dist, n, B, r, r, r);
// hipDeviceSynchronize();
// // printf("round %d Phase1: (%d, %d), Each row copy: %d entries. \n",r, r, r, min(B, n-r*B));
// for(int i= r*B; i<(r+1)*B && i<n ; i++){
// // printf("Acutal starting location: (%d, %d). MEM[%d]\n",i, r*B, (i*n+r*B));
// hipMemcpyPeer(device_Dist_1+(i*n+r*B),1, device_Dist+(i*n+r*B),0, min(B, n-r*B)*sizeof(unsigned int));
// }
// hipDeviceSynchronize();
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After phase1, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* Phase2 */
// Width: j direction
// Height: i direction
////////////// WIDTH blocks (height == 1) /////////////////
// GPU 0
// 2-1
if(r !=0){
dim3 nB(1,r);
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
hipLaunchKernelGGL(( cal3_small), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, r + 1, r);
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After gpu 0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
//////////// HEIGHT blocks (width == 1) /////////////
// // GPU 1
// hipSetDevice(1);
// // 2-3
// if(r!=0){
// dim3 nB(r,1);
// hipLaunchKernelGGL(( cal3), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3, 0, device_Dist_1, n, B, r, 0, r);
// }
// // 2-4
// if(round-r-1 !=0) {
// dim3 nB(round - r - 1,1);
// hipLaunchKernelGGL(( cal3), dim3(nB) , dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist_1, n, B, r, r + 1, r);
// }
// // Copy device_Dist_1 to Dist_1 and print out!
// #ifdef DEBUG_DEVICE_DIST1
// hipDeviceSynchronize();
// hipMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
// printf("After gpu 1, device_Dist_1: \n");
// for(int i=0; i<n; i++){
// for(int j=0; j<n; j++){
// if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
// else printf("%d ",Dist_1[i*n+j]);
// }
// }
// #endif
// PHASE 2 COPY From gpu0 to gpu1
#ifdef TIME
hipEventRecord(Commstart);
#endif
hipMemcpyPeerAsync(device_Dist_1,1, device_Dist,0, n*n*sizeof(unsigned int));
hipDeviceSynchronize();
#ifdef TIME
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
// Copy device_Dist to Dist_1 and print out!
#ifdef DEBUG_DEVICE_DIST
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After Copy from gpu 1 to gpu 0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* Phase 3*/ // => USE 2D block!
// block
// pivot block x y blocks
// 3-1:
// From (0,0) do (r, r) Blocks.
int _3_1_block_idx_j = 0;
int _3_1_0_start_i = 0;
int _3_1_0_block_height = ceil(r , 2);
// int 3_4_0_block_width = r;
int _3_1_1_start_i = _3_1_0_start_i + _3_1_0_block_height ;
int _3_1_1_block_height = r - _3_1_0_block_height;
hipSetDevice(0);
if(r != 0){
dim3 nB(_3_1_0_block_height,r);
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, _3_1_0_start_i, _3_1_block_idx_j);
}
hipSetDevice(1);
if(r != 0 && _3_1_1_block_height!=0 ){
dim3 nB(_3_1_1_block_height,r);
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist_1, n, B, r, _3_1_1_start_i, _3_1_block_idx_j);
}
// 3-2
// From (0, r+1) do (r * round-r-1 ) BLOCKS
int _3_2_block_idx_j = r+1;
int _3_2_0_start_i = 0; // Note, in Block_Idx, NOT Actual index i in Dist!
int _3_2_0_block_height = ceil(r , 2);
// int 3_2_0_block_width = round-r-1 ;
int _3_2_1_start_i = _3_2_0_start_i + _3_2_0_block_height ;
int _3_2_1_block_height = r - _3_2_0_block_height;
// int _3_2_block_width = (round-r-1 );
hipSetDevice(0);
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n ){
dim3 nB(_3_2_0_block_height,(round-r-1));
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, _3_2_0_start_i, _3_2_block_idx_j);
}
hipSetDevice(1);
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n && _3_2_1_block_height!=0 ){
dim3 nB(_3_2_1_block_height,(round-r-1));
hipLaunchKernelGGL(( cal3_small), dim3(nB), dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist_1, n, B, r, _3_2_1_start_i, _3_2_block_idx_j);
}
// 3-3
// From (r+1, 0) DO (round-r-1) * r Blocks!
int _3_3_block_idx_j = 0;
int _3_3_0_start_i = r+1; // Note, in Block_Idx, NOT Actual index i in Dist!
int _3_3_0_block_height = ceil(round-r-1 , 2);
// int 3_4_0_block_width = r;
int _3_3_1_start_i = _3_3_0_start_i + _3_3_0_block_height ;
int _3_3_1_block_height = (round-r-1) - _3_3_0_block_height;
hipSetDevice(0);
if(r !=0 && round-r-1 !=0){
dim3 nB(_3_3_0_block_height,r);
hipLaunchKernelGGL(( cal3_small), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, _3_3_0_start_i, _3_3_block_idx_j );
}
hipSetDevice(1);
if(r !=0 && round-r-1 !=0 && _3_3_1_block_height!=0 ){
dim3 nB(_3_3_1_block_height,r);
hipLaunchKernelGGL(( cal3_small), dim3(nB) ,dim3(num_threads) , sizeof(int)*B*B*3 , 0, device_Dist_1, n, B, r, _3_3_1_start_i, _3_3_block_idx_j );
}
// 3-4:
// From (r+1, r+1) do (round-r-1) * (round-r-1) blocks
int _3_4_block_idx_j = r+1;
int _3_4_0_start_i = r + 1;
int _3_4_0_block_height = ceil(round-r-1 , 2);
// int 3_4_0_block_width = (round-r-1);
// 3-4-1: from (r+1+ ceil(round-r-1, 2), r+1) Compute {(round-r-1) - ceil(round-r-1 , 2)} * (round-r-1) Blocks.
int _3_4_1_start_i = _3_4_0_start_i + _3_4_0_block_height;
int _3_4_1_block_height = (round-r-1) - _3_4_0_block_height;
// int 3_4_1_block_width = (round-r-1);
// 3-4-0: from (r+1, r+1). Compute ceil(round-r-1 , 2) * (round-r-1) Blocks.
hipSetDevice(0);
if(round-r-1 !=0 && _3_4_block_idx_j<n ){
dim3 nB_p3(_3_4_0_block_height, round - r - 1);
hipLaunchKernelGGL(( cal3_small), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist, n, B, r, _3_4_0_start_i, _3_4_block_idx_j);
}
hipSetDevice(1);
if(round-r-1 !=0 && _3_4_block_idx_j<n && _3_4_1_block_height!=0 ){
dim3 nB_p3(_3_4_1_block_height, round - r - 1);
hipLaunchKernelGGL(( cal3_small), dim3(nB_p3), dim3(num_threads), sizeof(int)*B*B*3 , 0, device_Dist_1, n, B, r, _3_4_1_start_i, _3_4_block_idx_j);
}
hipDeviceSynchronize();
hipSetDevice(0);
#ifdef TIME
hipEventRecord(Commstart);
#endif
/* --------- Copy: gpu 1-> gpu0 ----------- */
// 3-1-1
if(r !=0 && _3_1_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-1: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_1_1_start_i, _3_1_block_idx_j, (r-0)*B );
#endif
for(int i= _3_1_1_start_i*B ; i<( _3_1_1_start_i + _3_1_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_1_block_idx_j*B, (i*n+ _3_1_block_idx_j*B));
#endif
hipMemcpyPeerAsync(device_Dist+(i*n+ _3_1_block_idx_j*B),0, device_Dist_1+(i*n+ _3_1_block_idx_j*B),1 , (r-0)*B *sizeof(unsigned int));
}
}
// 3-2-1
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n && _3_2_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-2: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_2_1_start_i, _3_2_block_idx_j, (n-_3_2_block_idx_j*B) );
#endif
for(int i= _3_2_1_start_i*B ; i<( _3_2_1_start_i + _3_2_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_2_block_idx_j*B, i*n+ _3_2_block_idx_j*B );
#endif
hipMemcpyPeerAsync(device_Dist+(i*n+ _3_2_block_idx_j*B),0, device_Dist_1+(i*n+ _3_2_block_idx_j*B),1 , (n-_3_2_block_idx_j*B) *sizeof(unsigned int));
}
}
// 3-3-1
if(r !=0 && (round-r-1) !=0 && _3_3_1_block_height!=0){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-3: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_3_1_start_i, _3_3_block_idx_j, (r-0)*B );
#endif
for(int i= _3_3_1_start_i*B ; i<( _3_3_1_start_i + _3_3_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_3_block_idx_j*B, (i*n+ _3_3_block_idx_j*B));
#endif
hipMemcpyPeerAsync(device_Dist+(i*n+ _3_3_block_idx_j*B),0, device_Dist_1+(i*n+ _3_3_block_idx_j*B),1 , (r-0)*B *sizeof(unsigned int));
}
}
// 3-4-1
if(round-r-1 !=0 && _3_4_block_idx_j<n && _3_4_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-4: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_4_1_start_i, r+1, (n-(r+1)*B ));
#endif
for(int i= _3_4_1_start_i*B; i< (_3_4_1_start_i+ _3_4_1_block_height)*B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, (r+1)*B, (i*n+ (r+1)*B));
#endif
hipMemcpyPeerAsync(device_Dist+(i*n+ _3_4_block_idx_j*B),0, device_Dist_1+(i*n+ _3_4_block_idx_j*B),1, (n- _3_4_block_idx_j*B )*sizeof(unsigned int));
}
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
hipMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("After phase3 copy from gpu1 to gpu0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
printf("\n end round %d \n ===============================\n",r);
#endif
hipDeviceSynchronize();
#ifdef TIME
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
} // end for(r=0; r<round; r++)
#ifdef TIME
hipEventRecord(stop);
hipEventSynchronize(stop); // WAIT until 'stop' complete.
float time;
hipEventElapsedTime(&time, start, stop);
// printf("Took %.8f milliseconds",time);
printf("Computation: Took %.8f seconds\n",(time-Total_comm_time)/1000);
#endif
#ifdef TIME
hipEventRecord(Commstart);
#endif
hipMemcpyAsync(Dist, device_Dist, n * n *sizeof(unsigned int), hipMemcpyDeviceToHost);
#ifdef TIME
hipEventRecord(Commstop);
hipEventSynchronize(Commstop); // WAIT until 'stop' complete.
hipEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
printf("Communication %.8f seconds\n",Total_comm_time/1000);
#endif
// hipDeviceSynchronize(); // TODO : Can remove this
}
|
9f1af01723d27095592eae1cb1a8898a02b18e69.cu
|
// This version: c04: 6.45 seconds.
/*
This version is "NO Streaming" version.
0102 TODOs;
(V) 1. Correctness
// c01~07, p31~36
// c06 after: TOO SLOW to get answerQQ
(V) 2. Larger Blocking_Factor B
( ) 3. Initial padding (Remove if(i<n && j<n))
( ) 4. Asynchronous Peer Copy
( ) 5. Use different streams in cudaMemcpyPeerAsync! (stream 0~3)
( ) 6. Less cudaDeviceSynchronize();
( ) 7. #pragma omp parallel
(V) N. Hide printf into #ifdef DEBUG_PHASE1, PHASE2, PHASE3
*/
// System includes
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <omp.h>
#include <time.h>
#define TIME
// #define CUDA_NVPROF
// #define DEBUG_DIST
// #define DEBUG_DEVICE_DIST
// #define DEBUG_DEVICE_DIST1
// #define DEBUG_PHASE1
// #define DEBUG_PHASE2
// #define DEBUG_PHASE3
// #define CHECK_CORRECTNESS
const int BLOCKING_FACTOR = 32; // 32, 16, 8, 4, 2
const int INF = ((1 << 30) - 1);
// Global var stored in Data Section.
// const int V = 40010;
void input(char* inFileName);
void output(char* outFileName);
void print_ans(int num_V, char* ans_file);
void print_Dist(int num_V);
void block_FW(int B);
// void block_FW_small_n(int B);
void block_FW_MultiGPU_Old(int B);
void block_FW_MultiGPU(int B);
int ceil(int a, int b); // min num that >= a/b
// floor: max num <= a/b
int floor(int a, int b);
__device__ inline int Addr(int matrixIdx, int i, int j, int N){
return( N*N*matrixIdx + i*N + j);
}
// W: width, H: height
// __device__ inline int Addr2(int matrixIdx, int i, int j, int W, int H){
// return( W*H*matrixIdx + i*W + j);
// }
// Device_Boundary: in i direction, where you Split data Initially.
// PHASE 1 : ONE Block do k iterations with B*B threads.
// __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i<device_Boundary && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
// phase 1 for device 1
__global__ void cal_1(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i >= device_Boundary && i<n && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
// __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal3(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
// int i = block_start_y* B + blockIdx.y * B + threadIdx.y;
// int j = block_start_x* B + blockIdx.x * B + threadIdx.x;
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
// S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
// S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<device_Boundary && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<device_Boundary && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
// This for-loop CANNOT be parallelize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
/// KEY!! Don't USE % on K.
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// __syncthreads();
// if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) {
// S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)];
// }
// i , k // k , j // i , j
// if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
// Phase 3 for device 1.
__global__ void cal3_1(int device_Boundary, int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
if( i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if((Round*B + threadIdx.y)<n && j<n ) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i>=device_Boundary && i<n && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
int MAX_GPU_COUNT = 32;
int n, m;
// static int Dist[V][V];
int* Dist;
int * Dist_1;
int main(int argc, char* argv[]) {
#ifdef TIME
// struct timespec start, end, temp;
struct timespec total_starttime;
struct timespec total_temp;
struct timespec start;
struct timespec end;
struct timespec temp;
double IO_time=0.0;
double Total_time = 0.0;
clock_gettime(CLOCK_MONOTONIC, &total_starttime);
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
input(argv[1]);
#ifdef DEBUG_DEVICE_DIST
Dist_1 = (int*)malloc(sizeof(unsigned int)*n*n);
#endif
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
#endif
// printf("%f second on input\n", time_used);
// we have num_v, num_e, adj_matrix (Dist[V][V]) now
// int B = 512;
// Note: Since B*B threads, maximum B : 32 (MAX 1024 threads per block)
int B;
B = BLOCKING_FACTOR;
if(n < B){
block_FW_MultiGPU_Old(B);
}
else{
block_FW_MultiGPU(B);
}
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
output(argv[2]);
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
// IO Time
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
// Total Time
if ((end.tv_nsec - total_starttime.tv_nsec) < 0) {
total_temp.tv_sec = end.tv_sec-total_starttime.tv_sec-1;
total_temp.tv_nsec = 1000000000 + end.tv_nsec - total_starttime.tv_nsec;
} else {
total_temp.tv_sec = end.tv_sec - total_starttime.tv_sec;
total_temp.tv_nsec = end.tv_nsec - total_starttime.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
Total_time = total_temp.tv_sec + (double) total_temp.tv_nsec / 1000000000.0;
#endif
#ifdef TIME
printf("IO Time: %.8f seconds\n", IO_time);
printf("Total Time: %.8f seconds\n",Total_time);
#endif
printf("========== Comparing results... ===========\n");
#ifdef DEBUG_DIST
print_Dist(n);
#endif
#ifdef CHECK_CORRECTNESS
print_ans(n, argv[3]);
#endif
printf("Job Finished\n");
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file); // n = num_vertices
fread(&m, sizeof(int), 1, file); // m = num_edges
printf("V: %d, E: %d\n",n,m);
Dist = (int*) malloc(sizeof(int)*n*n);
// Initialize adjacency matrix
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i*n+j] = 0;
// Dist[i][j] = 0;
} else {
Dist[i*n+j] = INF;
// Dist[i][j] = INF;
}
}
}
// Sequentially read input edges and fill them into adj matrix.
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
// Dist[pair[0]][pair[1]] = pair[2];
Dist[ pair[0]*n+ pair[1]] = pair[2];
}
fclose(file);
}
void print_ans(int num_V, char* ans_file){
bool wrong = false;
FILE* file = fopen(ans_file, "rb");
int* Ans = (int*)malloc(sizeof(int)*n*n);
fread(Ans, sizeof(int), n*n, file);
if(num_V > 15) num_V = 15;
for(int i=0; i<num_V*num_V; i++){
if(Dist[i] != Ans[i]){
wrong = true;
printf("Wrong at offset %d, expected %d but get %d\n", i*4, Ans[i], Dist[i]);
printf("Fron %d to %d , cost: %d\n", (i/n), (i%n), Ans[i] );
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
if(!wrong) printf(" ======= Congratulation! =========\n");
printf("======== Your Dist ==========\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Dist[i*num_V+j]);
else printf("%d ", Dist[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
printf("======== ANSWER ==========\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Ans[i*num_V+j]);
else printf("%d ", Ans[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
}
void print_Dist(int num_V){
printf("========= Dist ============\n");
for(int i=0;i<num_V; i++){
for(int j=0; j<num_V; j++){
if(j==num_V-1) printf("%d\n",Dist[i*num_V+j]);
else printf("%d ", Dist[i*num_V+j]);
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF;
}
fwrite(Dist+i*n, sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
int floor(int a, int b){ return a/b >>1<<1; } // remove LSB ( discard the remainder.)
// 1204: Idea1 : one stream with 9 serialize kernel launch?
// memory to pass to GPU: B, r, r, r, 1, 1. ALL constant! No memory copy.
const int device_0 = 0;
const int device_1 = 1;
const int cudaEnablePeerAccess_Flags = 0;
#define NUM_THREAD 2
// For Large n.: Don't use Synchronize.
// n > 5000
void block_FW_MultiGPU(int B) {
printf("Large n : \n");
printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
printf(" %d * %d block\n",B,B);
int round = ceil(n, B);
// int cur_device_number;
int *device_Dist;
int *device_Dist_1;
int canGPU0AccessGPU1, canGPU1AccessGPU0;
int device_0_Boundary = ceil(n, 2); // e.g. 5/2 -> 3, 160/2 -> 80.
printf("ceil(%d, 2) :%d\n",n,device_0_Boundary);
printf("ceil % B remainder : %d\n",device_0_Boundary%B);
// Avoid cross pivot.
// 80 % 32 = 16. => (80 - 16 + 32) = 96.
// ceil(999,2) = 500, 500 % 32 = 20. 500 - 20 + 32 = 512.
if( device_0_Boundary%B !=0) device_0_Boundary = (device_0_Boundary- (device_0_Boundary%B) + B);
printf("device_0_Boundary: %d\n",device_0_Boundary);
// Record Computation time
#ifdef TIME
cudaSetDevice(0);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
#ifdef TIME
float Total_comm_time = 0;
cudaSetDevice(1);
cudaEvent_t Commstart_device_1, Commstop_device_1;
cudaEventCreate(&Commstart_device_1);
cudaEventCreate(&Commstop_device_1);
cudaSetDevice(0);
cudaEvent_t Commstart, Commstop;
cudaEventCreate(&Commstart);
cudaEventCreate(&Commstop);
cudaEventRecord(Commstart);
#endif
// Data Partition 1 : Split Top to device 0
// Bottom to device 1.
#pragma omp parallel num_threads(NUM_THREAD) //reduction(+:pixels)
{
int omp_id, omp_thread_num;
omp_id = omp_get_thread_num();
omp_thread_num = omp_get_num_threads();
if(omp_id==0){
cudaSetDevice(0);
cudaDeviceCanAccessPeer ( &canGPU0AccessGPU1, device_0, device_1 );
if(canGPU0AccessGPU1==1){
printf("Can 0 access 1? %d\n",canGPU0AccessGPU1);
cudaDeviceEnablePeerAccess ( device_1, cudaEnablePeerAccess_Flags );
cudaMalloc(&device_Dist, n * n* sizeof(unsigned int));
#ifdef TIME
cudaEventRecord(Commstart);
#endif
cudaMemcpyAsync(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
// cudaMemcpyAsync(device_Dist, Dist, n*device_0_Boundary*sizeof(unsigned int), cudaMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 0\n",omp_id);
}
else{
printf("Error, gpu 0 cannot directly access gpu 1\n");
// return 2;
}
}
else{
cudaSetDevice(1);
cudaDeviceCanAccessPeer ( &canGPU1AccessGPU0, device_1, device_0 );
if(canGPU1AccessGPU0==1){
printf("Can 1 access 0? %d\n",canGPU1AccessGPU0);
cudaDeviceEnablePeerAccess ( device_0, cudaEnablePeerAccess_Flags );
// cudaGetDevice(&cur_device_number);
cudaMalloc(&device_Dist_1, n * n* sizeof(unsigned int));
cudaMemcpyAsync(device_Dist_1, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
// cudaMemcpyAsync(device_Dist_1+device_0_Boundary*n, Dist+device_0_Boundary*n, ( n*n -n*device_0_Boundary)*sizeof(unsigned int), cudaMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 1\n",omp_id);
}
else{
printf("Error, gpu 1 cannot directly access gpu 0\n");
// return 2;
}
}
}
#ifdef TIME
float Commtime;
cudaSetDevice(0);
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime, Commstart, Commstop);
printf("H2D copy took %.8f seconds\n",Commtime/1000);
Total_comm_time += Commtime;
#endif
#ifdef DEBUG_DEVICE_DIST
printf("========== Initial Condition =========\n");
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("Initial, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("Initial, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
dim3 num_threads(B,B);
for (int r = 0; r < round; ++r) {
#ifdef DEBUG_DIST
printf("========== Round %d ================\n",r);
// print_Dist(n);
#endif
// printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
if(r*B < device_0_Boundary) { // Device 0 do pivot
// printf("Pivot at GPU 0!\n");
cudaSetDevice(0);
cal<<< 1, num_threads , sizeof(int)*B*B>>> (device_0_Boundary, device_Dist, n, B, r, r, r);
#ifdef TIME
cudaEventRecord(Commstart);
#endif
// Copy WHOLE pivot ROW to the other device.
for(int i= r*B; i<(r+1)*B && i<n; i++)
cudaMemcpyPeer(device_Dist_1+i*n,1, device_Dist+i*n,0 , n*sizeof(unsigned int));
#ifdef TIME
float Commtime_Phase1;
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime_Phase1, Commstart, Commstop);
// printf("Phase1 mem copy took %.8f seconds\n",Commtime_Phase1/1000);
Total_comm_time += Commtime_Phase1;
#endif
}
else{ // Device 1 do then copy to the other.
cudaSetDevice(1);
// printf("Pivot at GPU 1!\n");
cal_1<<< 1, num_threads , sizeof(int)*B*B>>> (device_0_Boundary, device_Dist_1, n, B, r, r, r);
// Copy pivot ROW to the other device.
#ifdef TIME
cudaEventRecord(Commstart_device_1);
#endif
for(int i= r*B; i<(r+1)*B && i<n; i++)
cudaMemcpyPeer(device_Dist+i*n,0, device_Dist_1+i*n,1 , n*sizeof(unsigned int));
#ifdef TIME
float Commtime_Phase1;
cudaEventRecord(Commstop_device_1);
cudaEventSynchronize(Commstop_device_1); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime_Phase1, Commstart_device_1, Commstop_device_1);
// printf("Phase1 mem copy took %.8f seconds\n",Commtime_Phase1/1000);
Total_comm_time += Commtime_Phase1;
#endif
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After phase1, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After phase1, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* ----------- Phase 2 ------------- */
cudaSetDevice(0);
// Compute four sub-phase
if(r*B < device_0_Boundary){
// TODO : Modify cal() and cal3() : Need to pass boundary into!!
// 2-1
if(r !=0){
dim3 nB(1,r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_0_Boundary, device_Dist, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_0_Boundary, device_Dist, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist, n, B, r, r + 1, r);
}
}
// Compute ONLY 2-3
else{
//2-3
if(r!=0){
dim3 nB(r,1);
cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_0_Boundary, device_Dist, n, B, r, 0, r);
}
}
cudaSetDevice(1);
// Compute ONLY 2-4
if(r*B < device_0_Boundary){
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
cal3_1<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist_1, n, B, r, r + 1, r);
}
}
// Compute four sub-phase
else{
// 2-1
if(r !=0){
dim3 nB(1,r);
cal3_1<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_0_Boundary, device_Dist_1, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
cal3_1<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist_1, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
cal3_1<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_0_Boundary, device_Dist_1, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
cal3_1<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist_1, n, B, r, r + 1, r);
}
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After PHASE 2, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After PHASE 2, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* ----------- Phase 3 ------------- */
cudaSetDevice(0);
if(r != 0){
dim3 nB(r,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0, r, r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist, n, B, r, 0, 0);
}
if(r !=0 && (round-r-1) !=0){
dim3 nB(r,(round-r-1));
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist, n, B, r, 0, r + 1);
}
if(r !=0 && round-r-1 !=0){
dim3 nB((round-r-1),r);
// cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist, n, B, r, r + 1, 0);
}
if(round-r-1 !=0){
dim3 nB_p3(round - r - 1, round - r - 1);
// cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist, n, B, r, r + 1, r + 1);
}
cudaSetDevice(1);
if(r != 0){
dim3 nB(r,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0, r, r);
cal3_1<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist_1, n, B, r, 0, 0);
}
if(r !=0 && (round-r-1) !=0){
dim3 nB(r,(round-r-1));
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
cal3_1<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist_1, n, B, r, 0, r + 1);
}
if(r !=0 && round-r-1 !=0){
dim3 nB((round-r-1),r);
// cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
cal3_1<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_0_Boundary, device_Dist_1, n, B, r, r + 1, 0);
}
if(round-r-1 !=0){
dim3 nB_p3(round - r - 1, round - r - 1);
// cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
cal3_1<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_0_Boundary,device_Dist_1, n, B, r, r + 1, r + 1);
}
#ifdef DEBUG_DEVICE_DIST
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After PHASE3, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
#ifdef DEBUG_DEVICE_DIST1
cudaMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After PHASE3, device_Dist_1: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
} // end for(r=0; r<round; r++)
#ifdef TIME
cudaSetDevice(0);
cudaEventRecord(Commstart);
#endif
// Independently copy back to CPU
cudaMemcpyAsync(Dist, device_Dist, n*device_0_Boundary*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(Dist+device_0_Boundary*n, device_Dist_1+device_0_Boundary*n, ( n*n - n*device_0_Boundary) *sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#ifdef TIME
float Commtime_D2H;
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime_D2H, Commstart, Commstop);
printf("D2H copy took %.8f seconds\n",Commtime_D2H/1000);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime_D2H;
printf("Communication %.8f seconds\n",Total_comm_time/1000);
#endif
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop); // WAIT until 'stop' complete.
float time;
cudaEventElapsedTime(&time, start, stop);
// printf("Took %.8f milliseconds",time);
printf("Computation(raw): Took %.8f seconds\n",(time)/1000);
printf("Computation: Took %.8f seconds\n",(time-Total_comm_time)/1000);
#endif
}
/* ================ For Small n ====================== */
/* Define small n cal & cal3 */
__global__ void cal_small(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_x*B + threadIdx.y;
int j = block_start_y*B + threadIdx.x;
if(i<n && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
// if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)]);
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
__global__ void cal3_small(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
// int i = block_start_y* B + blockIdx.y * B + threadIdx.y;
// int j = block_start_x* B + blockIdx.x * B + threadIdx.x;
int i = block_start_x* B + blockIdx.x * B + threadIdx.y;
int j = block_start_y* B + blockIdx.y * B + threadIdx.x;
// S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
// S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
// This for-loop CANNOT be parallelize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
/// KEY!! Don't USE % on K.
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// __syncthreads();
// if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) {
// S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)];
// }
// i , k // k , j // i , j
// if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)];
// }
S[Addr(0,threadIdx.y, threadIdx.x, B)] = min(S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)], S[Addr(0,threadIdx.y, threadIdx.x, B)] );
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
void block_FW_MultiGPU_Old(int B){
printf("Small n: \n");
printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
printf(" %d * %d block\n",B,B);
int round = ceil(n, B);
// int cur_device_number;
// cudaMemcpy();
// int *device_Dist;
int *device_Dist;
int *device_Dist_1;
int canGPU0AccessGPU1, canGPU1AccessGPU0;
#ifdef TIME
float Total_comm_time = 0;
float Commtime;
cudaEvent_t Commstart, Commstop;
cudaEventCreate(&Commstart);
cudaEventCreate(&Commstop);
#endif
#pragma omp parallel num_threads(NUM_THREAD) //reduction(+:pixels)
{
int omp_id, omp_thread_num;
omp_id = omp_get_thread_num();
omp_thread_num = omp_get_num_threads();
if(omp_id==0){
cudaSetDevice(0);
cudaDeviceCanAccessPeer ( &canGPU0AccessGPU1, device_0, device_1 );
if(canGPU0AccessGPU1==1){
printf("Can 0 access 1? %d\n",canGPU0AccessGPU1);
cudaDeviceEnablePeerAccess ( device_1, cudaEnablePeerAccess_Flags );
cudaMalloc(&device_Dist, n * n* sizeof(unsigned int));
#ifdef TIME
cudaEventRecord(Commstart);
#endif
cudaMemcpyAsync(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 0\n",omp_id);
}
else{
printf("Error, gpu 0 cannot directly access gpu 1\n");
// return 2;
}
}
else{
cudaSetDevice(1);
cudaDeviceCanAccessPeer ( &canGPU1AccessGPU0, device_1, device_0 );
if(canGPU1AccessGPU0==1){
printf("Can 1 access 0? %d\n",canGPU1AccessGPU0);
cudaDeviceEnablePeerAccess ( device_0, cudaEnablePeerAccess_Flags );
// cudaGetDevice(&cur_device_number);
cudaMalloc(&device_Dist_1, n * n* sizeof(unsigned int));
// cudaMemcpyAsync(device_Dist_1, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
printf("omp t%d allocate & copy gpu 1\n",omp_id);
}
else{
printf("Error, gpu 1 cannot directly access gpu 0\n");
// return 2;
}
}
}
#ifdef TIME
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
#ifdef TIME
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
// 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1
dim3 num_threads(B,B);
for (int r = 0; r < round; ++r) {
#ifdef DEBUG_DIST
printf("========== Round %d ================\n",r);
// print_Dist(n);
#endif
// printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
// EX: 3*3 Blocks. At iteration k (round r), send D(r,r)
// cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1);
cudaSetDevice(0);
cal_small<<< 1, num_threads , sizeof(int)*B*B>>> (device_Dist, n, B, r, r, r);
// cudaDeviceSynchronize();
// // printf("round %d Phase1: (%d, %d), Each row copy: %d entries. \n",r, r, r, min(B, n-r*B));
// for(int i= r*B; i<(r+1)*B && i<n ; i++){
// // printf("Acutal starting location: (%d, %d). MEM[%d]\n",i, r*B, (i*n+r*B));
// cudaMemcpyPeer(device_Dist_1+(i*n+r*B),1, device_Dist+(i*n+r*B),0, min(B, n-r*B)*sizeof(unsigned int));
// }
// cudaDeviceSynchronize();
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After phase1, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* Phase2 */
// Width: j direction
// Height: i direction
////////////// WIDTH blocks (height == 1) /////////////////
// GPU 0
// 2-1
if(r !=0){
dim3 nB(1,r);
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0);
}
// 2-2
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r, r + 1);
}
//2-3
if(r!=0){
dim3 nB(r,1);
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, 0, r);
}
// 2-4
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
cal3_small<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r);
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After gpu 0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
//////////// HEIGHT blocks (width == 1) /////////////
// // GPU 1
// cudaSetDevice(1);
// // 2-3
// if(r!=0){
// dim3 nB(r,1);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist_1, n, B, r, 0, r);
// }
// // 2-4
// if(round-r-1 !=0) {
// dim3 nB(round - r - 1,1);
// cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist_1, n, B, r, r + 1, r);
// }
// // Copy device_Dist_1 to Dist_1 and print out!
// #ifdef DEBUG_DEVICE_DIST1
// cudaDeviceSynchronize();
// cudaMemcpy(Dist_1, device_Dist_1, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
// printf("After gpu 1, device_Dist_1: \n");
// for(int i=0; i<n; i++){
// for(int j=0; j<n; j++){
// if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
// else printf("%d ",Dist_1[i*n+j]);
// }
// }
// #endif
// PHASE 2 COPY From gpu0 to gpu1
#ifdef TIME
cudaEventRecord(Commstart);
#endif
cudaMemcpyPeerAsync(device_Dist_1,1, device_Dist,0, n*n*sizeof(unsigned int));
cudaDeviceSynchronize();
#ifdef TIME
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
// Copy device_Dist to Dist_1 and print out!
#ifdef DEBUG_DEVICE_DIST
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After Copy from gpu 1 to gpu 0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
#endif
/* Phase 3*/ // => USE 2D block!
// 計算其他的 block
// 和pivot block 在 x 軸和 y 軸都沒有交集的 blocks!
// 3-1:
// From (0,0) do (r, r) Blocks.
int _3_1_block_idx_j = 0;
int _3_1_0_start_i = 0;
int _3_1_0_block_height = ceil(r , 2);
// int 3_4_0_block_width = r;
int _3_1_1_start_i = _3_1_0_start_i + _3_1_0_block_height ;
int _3_1_1_block_height = r - _3_1_0_block_height;
cudaSetDevice(0);
if(r != 0){
dim3 nB(_3_1_0_block_height,r);
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, _3_1_0_start_i, _3_1_block_idx_j);
}
cudaSetDevice(1);
if(r != 0 && _3_1_1_block_height!=0 ){
dim3 nB(_3_1_1_block_height,r);
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist_1, n, B, r, _3_1_1_start_i, _3_1_block_idx_j);
}
// 3-2
// From (0, r+1) do (r * round-r-1 ) BLOCKS
int _3_2_block_idx_j = r+1;
int _3_2_0_start_i = 0; // Note, in Block_Idx, NOT Actual index i in Dist!
int _3_2_0_block_height = ceil(r , 2);
// int 3_2_0_block_width = round-r-1 ;
int _3_2_1_start_i = _3_2_0_start_i + _3_2_0_block_height ;
int _3_2_1_block_height = r - _3_2_0_block_height;
// int _3_2_block_width = (round-r-1 );
cudaSetDevice(0);
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n ){
dim3 nB(_3_2_0_block_height,(round-r-1));
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, _3_2_0_start_i, _3_2_block_idx_j);
}
cudaSetDevice(1);
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n && _3_2_1_block_height!=0 ){
dim3 nB(_3_2_1_block_height,(round-r-1));
cal3_small<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist_1, n, B, r, _3_2_1_start_i, _3_2_block_idx_j);
}
// 3-3
// From (r+1, 0) DO (round-r-1) * r Blocks!
int _3_3_block_idx_j = 0;
int _3_3_0_start_i = r+1; // Note, in Block_Idx, NOT Actual index i in Dist!
int _3_3_0_block_height = ceil(round-r-1 , 2);
// int 3_4_0_block_width = r;
int _3_3_1_start_i = _3_3_0_start_i + _3_3_0_block_height ;
int _3_3_1_block_height = (round-r-1) - _3_3_0_block_height;
cudaSetDevice(0);
if(r !=0 && round-r-1 !=0){
dim3 nB(_3_3_0_block_height,r);
cal3_small<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, _3_3_0_start_i, _3_3_block_idx_j );
}
cudaSetDevice(1);
if(r !=0 && round-r-1 !=0 && _3_3_1_block_height!=0 ){
dim3 nB(_3_3_1_block_height,r);
cal3_small<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist_1, n, B, r, _3_3_1_start_i, _3_3_block_idx_j );
}
// 3-4:
// From (r+1, r+1) do (round-r-1) * (round-r-1) blocks
int _3_4_block_idx_j = r+1;
int _3_4_0_start_i = r + 1;
int _3_4_0_block_height = ceil(round-r-1 , 2);
// int 3_4_0_block_width = (round-r-1);
// 3-4-1: from (r+1+ ceil(round-r-1, 2), r+1) Compute {(round-r-1) - ceil(round-r-1 , 2)} * (round-r-1) Blocks.
int _3_4_1_start_i = _3_4_0_start_i + _3_4_0_block_height;
int _3_4_1_block_height = (round-r-1) - _3_4_0_block_height;
// int 3_4_1_block_width = (round-r-1);
// 3-4-0: from (r+1, r+1). Compute ceil(round-r-1 , 2) * (round-r-1) Blocks.
cudaSetDevice(0);
if(round-r-1 !=0 && _3_4_block_idx_j<n ){
dim3 nB_p3(_3_4_0_block_height, round - r - 1);
cal3_small<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, _3_4_0_start_i, _3_4_block_idx_j);
}
cudaSetDevice(1);
if(round-r-1 !=0 && _3_4_block_idx_j<n && _3_4_1_block_height!=0 ){
dim3 nB_p3(_3_4_1_block_height, round - r - 1);
cal3_small<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist_1, n, B, r, _3_4_1_start_i, _3_4_block_idx_j);
}
cudaDeviceSynchronize();
cudaSetDevice(0);
#ifdef TIME
cudaEventRecord(Commstart);
#endif
/* --------- Copy: gpu 1-> gpu0 ----------- */
// 3-1-1
if(r !=0 && _3_1_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-1: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_1_1_start_i, _3_1_block_idx_j, (r-0)*B );
#endif
for(int i= _3_1_1_start_i*B ; i<( _3_1_1_start_i + _3_1_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_1_block_idx_j*B, (i*n+ _3_1_block_idx_j*B));
#endif
cudaMemcpyPeerAsync(device_Dist+(i*n+ _3_1_block_idx_j*B),0, device_Dist_1+(i*n+ _3_1_block_idx_j*B),1 , (r-0)*B *sizeof(unsigned int));
}
}
// 3-2-1
if(r !=0 && (round-r-1) !=0 && _3_2_block_idx_j<n && _3_2_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-2: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_2_1_start_i, _3_2_block_idx_j, (n-_3_2_block_idx_j*B) );
#endif
for(int i= _3_2_1_start_i*B ; i<( _3_2_1_start_i + _3_2_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_2_block_idx_j*B, i*n+ _3_2_block_idx_j*B );
#endif
cudaMemcpyPeerAsync(device_Dist+(i*n+ _3_2_block_idx_j*B),0, device_Dist_1+(i*n+ _3_2_block_idx_j*B),1 , (n-_3_2_block_idx_j*B) *sizeof(unsigned int));
}
}
// 3-3-1
if(r !=0 && (round-r-1) !=0 && _3_3_1_block_height!=0){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-3: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_3_1_start_i, _3_3_block_idx_j, (r-0)*B );
#endif
for(int i= _3_3_1_start_i*B ; i<( _3_3_1_start_i + _3_3_1_block_height) *B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, _3_3_block_idx_j*B, (i*n+ _3_3_block_idx_j*B));
#endif
cudaMemcpyPeerAsync(device_Dist+(i*n+ _3_3_block_idx_j*B),0, device_Dist_1+(i*n+ _3_3_block_idx_j*B),1 , (r-0)*B *sizeof(unsigned int));
}
}
// 3-4-1
if(round-r-1 !=0 && _3_4_block_idx_j<n && _3_4_1_block_height!=0 ){
#ifdef DEBUG_PHASE3
printf("round %d Phase3-4: GPU 1 copy from (%d, %d), Each row copy: %d entries. \n",r, _3_4_1_start_i, r+1, (n-(r+1)*B ));
#endif
for(int i= _3_4_1_start_i*B; i< (_3_4_1_start_i+ _3_4_1_block_height)*B && i<n ; i++){ // row-wise copy. from (0, r) Block_width = 1, Block_height=r
#ifdef DEBUG_PHASE3
printf("Actual from (%d, %d), MEM[%d]. \n",i, (r+1)*B, (i*n+ (r+1)*B));
#endif
cudaMemcpyPeerAsync(device_Dist+(i*n+ _3_4_block_idx_j*B),0, device_Dist_1+(i*n+ _3_4_block_idx_j*B),1, (n- _3_4_block_idx_j*B )*sizeof(unsigned int));
}
}
#ifdef DEBUG_DEVICE_DIST
// Copy device_Dist to Dist_1 and print out!
cudaMemcpy(Dist_1, device_Dist, n*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("After phase3 copy from gpu1 to gpu0, device_Dist: \n");
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(j== n-1) printf("%d\n",Dist_1[i*n+j]);
else printf("%d ",Dist_1[i*n+j]);
}
}
printf("\n end round %d \n ===============================\n",r);
#endif
cudaDeviceSynchronize();
#ifdef TIME
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
#endif
} // end for(r=0; r<round; r++)
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop); // WAIT until 'stop' complete.
float time;
cudaEventElapsedTime(&time, start, stop);
// printf("Took %.8f milliseconds",time);
printf("Computation: Took %.8f seconds\n",(time-Total_comm_time)/1000);
#endif
#ifdef TIME
cudaEventRecord(Commstart);
#endif
cudaMemcpyAsync(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost);
#ifdef TIME
cudaEventRecord(Commstop);
cudaEventSynchronize(Commstop); // WAIT until 'stop' complete.
cudaEventElapsedTime(&Commtime, Commstart, Commstop);
// printf("Took %.8f milliseconds",time);
Total_comm_time += Commtime;
printf("Communication %.8f seconds\n",Total_comm_time/1000);
#endif
// cudaDeviceSynchronize(); // TODO : Can remove this
}
|
7f960fd4057b11e72e95b1bd5df1a301a336629e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#define BW 16
#define MAX_VAL 255
#define Max(a, b) ((a) < (b) ? (b) : (a))
#define Min(a, b) ((a) > (b) ? (b) : (a))
#define Clamp(a, start, end) Max(Min(a, end), start)
#define value(arry, i, j, k) arry[((i)*width + (j)) * depth + (k)]
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void stencil(unsigned char *output, unsigned char *input, int width, int height,
int depth)
{
//@@ INSERT CODE HERE
#define output(i, j, k) value(output, i, j, k)
#define input(i, j, k) value(input, i, j, k)
/* Store the thread dimensions on registers*/
int bx=blockIdx.x,by=blockIdx.y,bz=blockIdx.z;
int tx=threadIdx.x,ty=threadIdx.y,tz=threadIdx.z;
/* Declare a shared memory region for each block of threads*/
__shared__ int SharedMemBlock[BW][BW][BW];
/* Find out the row and column for each thread*/
int XIdx=by*blockDim.y+ty;
int YIdx=bx*blockDim.x+tx;
int ZIdx=bz*blockDim.z+tz;
SharedMemBlock[ty][tx][tz]=input(XIdx,YIdx,ZIdx);
__syncthreads();
int Pvalue=0;
if(ZIdx+1>=(bz+1)*BW)
Pvalue+=input(XIdx,YIdx,ZIdx+1);
else
Pvalue+=SharedMemBlock[ty][tx][tz+1];
if(ZIdx-1<bz*BW)
Pvalue+=input(XIdx,YIdx,ZIdx-1);
else
Pvalue+=SharedMemBlock[ty][tx][tz-1];
if(YIdx+1>=(by+1)*BW)
Pvalue+=input(XIdx,YIdx+1,ZIdx);
else
Pvalue+=SharedMemBlock[ty][tx+1][tz];
if(YIdx-1<by*BW)
Pvalue+=input(XIdx,YIdx-1,ZIdx);
else
Pvalue+=SharedMemBlock[ty][tx-1][tz];
if(XIdx+1>=(bx+1)*BW)
Pvalue+=input(XIdx+1,YIdx,ZIdx);
else
Pvalue+=SharedMemBlock[ty+1][tx][tz];
if(XIdx-1<(bx)*BW)
Pvalue+=input(XIdx-1,YIdx,ZIdx);
else
Pvalue+=SharedMemBlock[ty-1][tx][tz];
Pvalue-=6*input(XIdx,YIdx,ZIdx);
output(XIdx,YIdx,ZIdx)=Clamp(Pvalue, 0, MAX_VAL);
#undef output
#undef input
}
static void launch_stencil(unsigned char *deviceOutputData, unsigned char *deviceInputData,
int width, int height, int depth)
{
//@@ INSERT CODE HERE
dim3 dimBlock(BW,BW,BW);
dim3 dimGrid(ceil((float)width/BW),ceil((float)height/BW),ceil((float)depth/BW));
hipLaunchKernelGGL(( stencil), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceOutputData,deviceInputData,width,height,depth);
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
unsigned char *hostInputData;
unsigned char *hostOutputData;
unsigned char *deviceInputData;
unsigned char *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputData,
width * height * depth * sizeof(unsigned char));
hipMalloc((void **)&deviceOutputData,
width * height * depth * sizeof(unsigned char));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputData, hostInputData,
width * height * depth * sizeof(unsigned char),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputData, deviceOutputData,
width * height * depth * sizeof(unsigned char),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(arg, output);
hipFree(deviceInputData);
hipFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
7f960fd4057b11e72e95b1bd5df1a301a336629e.cu
|
#include "wb.h"
#define BW 16
#define MAX_VAL 255
#define Max(a, b) ((a) < (b) ? (b) : (a))
#define Min(a, b) ((a) > (b) ? (b) : (a))
#define Clamp(a, start, end) Max(Min(a, end), start)
#define value(arry, i, j, k) arry[((i)*width + (j)) * depth + (k)]
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void stencil(unsigned char *output, unsigned char *input, int width, int height,
int depth)
{
//@@ INSERT CODE HERE
#define output(i, j, k) value(output, i, j, k)
#define input(i, j, k) value(input, i, j, k)
/* Store the thread dimensions on registers*/
int bx=blockIdx.x,by=blockIdx.y,bz=blockIdx.z;
int tx=threadIdx.x,ty=threadIdx.y,tz=threadIdx.z;
/* Declare a shared memory region for each block of threads*/
__shared__ int SharedMemBlock[BW][BW][BW];
/* Find out the row and column for each thread*/
int XIdx=by*blockDim.y+ty;
int YIdx=bx*blockDim.x+tx;
int ZIdx=bz*blockDim.z+tz;
SharedMemBlock[ty][tx][tz]=input(XIdx,YIdx,ZIdx);
__syncthreads();
int Pvalue=0;
if(ZIdx+1>=(bz+1)*BW)
Pvalue+=input(XIdx,YIdx,ZIdx+1);
else
Pvalue+=SharedMemBlock[ty][tx][tz+1];
if(ZIdx-1<bz*BW)
Pvalue+=input(XIdx,YIdx,ZIdx-1);
else
Pvalue+=SharedMemBlock[ty][tx][tz-1];
if(YIdx+1>=(by+1)*BW)
Pvalue+=input(XIdx,YIdx+1,ZIdx);
else
Pvalue+=SharedMemBlock[ty][tx+1][tz];
if(YIdx-1<by*BW)
Pvalue+=input(XIdx,YIdx-1,ZIdx);
else
Pvalue+=SharedMemBlock[ty][tx-1][tz];
if(XIdx+1>=(bx+1)*BW)
Pvalue+=input(XIdx+1,YIdx,ZIdx);
else
Pvalue+=SharedMemBlock[ty+1][tx][tz];
if(XIdx-1<(bx)*BW)
Pvalue+=input(XIdx-1,YIdx,ZIdx);
else
Pvalue+=SharedMemBlock[ty-1][tx][tz];
Pvalue-=6*input(XIdx,YIdx,ZIdx);
output(XIdx,YIdx,ZIdx)=Clamp(Pvalue, 0, MAX_VAL);
#undef output
#undef input
}
static void launch_stencil(unsigned char *deviceOutputData, unsigned char *deviceInputData,
int width, int height, int depth)
{
//@@ INSERT CODE HERE
dim3 dimBlock(BW,BW,BW);
dim3 dimGrid(ceil((float)width/BW),ceil((float)height/BW),ceil((float)depth/BW));
stencil<<<dimGrid,dimBlock>>>(deviceOutputData,deviceInputData,width,height,depth);
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
unsigned char *hostInputData;
unsigned char *hostOutputData;
unsigned char *deviceInputData;
unsigned char *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputData,
width * height * depth * sizeof(unsigned char));
cudaMalloc((void **)&deviceOutputData,
width * height * depth * sizeof(unsigned char));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputData, hostInputData,
width * height * depth * sizeof(unsigned char),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputData, deviceOutputData,
width * height * depth * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbSolution(arg, output);
cudaFree(deviceInputData);
cudaFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
92cc4d91bba8c46e174e93a9754617c83cc4d28b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <helper_cuda.h>
#include "../utils/Logger.h"
bool initCuda(int argc, char **argv)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
int cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProps;
checkCudaErrors(hipGetDeviceProperties(&deviceProps, cuda_device));
int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >=4;
printf("GPU: %s (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor);
if (!cdpCapable)
{
ErrorLogger::getInstance() >> "RandomWalk potrzebuje SM 3.5 lub wyzszej do CUDA Dynamic Parallelism.\n";
return false;
}
return true;
}
|
92cc4d91bba8c46e174e93a9754617c83cc4d28b.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <helper_cuda.h>
#include "../utils/Logger.h"
bool initCuda(int argc, char **argv)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
int cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProps;
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, cuda_device));
int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >=4;
printf("GPU: %s (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor);
if (!cdpCapable)
{
ErrorLogger::getInstance() >> "RandomWalk potrzebuje SM 3.5 lub wyzszej do CUDA Dynamic Parallelism.\n";
return false;
}
return true;
}
|
dc1fb4161adb3bfce58dedb9a29e68e1e92daccf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/gpu_001_lidar_mapping.cuh"
__device__ inline float calcLaserAngle(int laser_rays, float angle_min, float angle_max, int tid)
{
return ((float)tid+0.5)/laser_rays*(angle_max - angle_min) + angle_min;
}
__device__ inline Point3F32 dkLidarToScan(const HTMatrixLidarCPU* dk_cpu, float th5, float a5)
{
Point3F32 point;
// A_GPU_14
point.x = a5*dk_cpu->m_0*cos(th5) + a5*dk_cpu->m_1*sin(th5) + dk_cpu->m_3;
// A_GPU_24
point.y = a5*dk_cpu->m_4*cos(th5) + a5*dk_cpu->m_5*sin(th5) + dk_cpu->m_7;
// A_GPU_34
point.z = a5*dk_cpu->m_8*cos(th5) + a5*dk_cpu->m_9*sin(th5) + dk_cpu->m_11;
return point;
}
__device__ inline Point2I32 mapRealToGPU(float point_x, float point_y, float map_orient, float map_scale, float map_offset_pix)
{
Point2I32 map_pose;
float point_orient = atan2f(point_y, point_x);
float point_dist = sqrtf(point_x*point_x + point_y*point_y);
map_pose.x = (int) (sinf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix);
map_pose.y = (int) (cosf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix);
return map_pose;
}
__global__ void lidarMappingKernel(
float* laser_scan,
const HTMatrixLidarCPU dk_cpu,
const int laser_rays,
const float angle_min,
const float angle_max,
int16_t* heightmap,
const int map_x,
const int map_y,
const int height_scale,
const int map_scale,
const float map_orient,
const float map_offset_pix,
float* debug)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// DISTANCE MEASURED BY LASER SCAN
float a5 = laser_scan[tid];
// ANGLE FROM MIDDLE OF SCANNING AREA
float th5 = calcLaserAngle(laser_rays, angle_min, angle_max, tid);
// GLOBAL POSITION OF POINT ON THE END OF THE SCAN
Point3F32 point_world = dkLidarToScan(&dk_cpu, th5, a5);
// POSITION OF SCAN POINT ON GPU HEIGHTMAP
Point2I32 point_map = mapRealToGPU(point_world.x, point_world.y, map_orient, map_scale, map_offset_pix);
// CHECKING IF SCAN POINT IS INSIDE GPU MAP
if(point_map.x >=0 && point_map.x < map_x && point_map.y >=0 && point_map.y < map_y)
{
// ASSIGN NEW POINT TO GPU MAP
heightmap[point_map.x * map_y + point_map.y] = (int16_t) (point_world.z * height_scale);
}
}
GpuLidarMapping::GpuLidarMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros)
{
this->_rpm = _rpm;
this->_ros = _ros;
}
void GpuLidarMapping::allocateMemory(int laser_rays, float angle_min, float angle_max)
{
this->laser_rays = laser_rays;
this->angle_min = angle_min;
this->angle_max = angle_max;
gpuErrchk(hipMalloc((void**)&dev_laser_scan, laser_rays * sizeof(float)) );
gpuErrchk(hipMalloc((void**)&dev_dk_matrix, 16 * sizeof(double)) );
}
void GpuLidarMapping::freeMemory()
{
gpuErrchk( hipFree(dev_dk_matrix) );
gpuErrchk( hipFree(dev_laser_scan) );
}
void GpuLidarMapping::drawInitialHeightmapCircle()
{
_rpm->dev_heightmap.drawCircle(init_circle_height, _rpm->map_offset_pix, _rpm->map_offset_pix, init_circle_radius); // -40 - height form rover center to bottom of its wheels
}
void GpuLidarMapping::copyInputToDevice()
{
// Copying laser scan to GPU
gpuErrchk( hipMemcpy(this->dev_laser_scan, &_ros->laser_scan.ranges[0], this->laser_rays * sizeof(float), hipMemcpyHostToDevice) );
}
void GpuLidarMapping::executeKernel()
{
// CPU part
this->dk_cpu = dkWorldToLidarReduced(
_ros->odom.pose.pose.position.x,
_ros->odom.pose.pose.position.y,
_ros->odom.pose.pose.position.z,
_ros->odom.pose.pose.orientation.x,
_ros->odom.pose.pose.orientation.y,
_ros->odom.pose.pose.orientation.z,
_ros->odom.pose.pose.orientation.w,
_ros->lidar_pose.data,
this->dk_a1,
this->dk_d2,
this->dk_al3);
// GPU part
hipLaunchKernelGGL(( lidarMappingKernel) , dim3(this->laser_rays), dim3(1) , 0, 0,
this->dev_laser_scan,
this->dk_cpu,
this->laser_rays,
this->angle_min,
this->angle_max,
_rpm->dev_heightmap.data,
_rpm->dev_heightmap.size_x,
_rpm->dev_heightmap.size_y,
_rpm->height_scale,
_rpm->map_scale,
_rpm->map_orient,
_rpm->map_offset_pix,
_rpm->dev_debug);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
void GpuLidarMapping::copyOutputToHost()
{
gpuErrchk( hipMemcpy(_rpm->host_heightmap.data, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size() * sizeof(int16_t), hipMemcpyDeviceToHost) );
}
void GpuLidarMapping::display()
{
_rpm->host_heightmap.display("heightmap");
}
HTMatrix dkWorldToLidar(
double tx,
double ty,
double tz,
double qx,
double qy,
double qz,
double qw,
double th2,
const double a1,
const double d2,
const double al3
)
{
OctaveVariable <double> TX(tx);
OctaveVariable <double> TY(ty);
OctaveVariable <double> TZ(tz);
OctaveVariable <double> QX(qx);
OctaveVariable <double> QY(qy);
OctaveVariable <double> QZ(qz);
OctaveVariable <double> QW(qw);
OctaveVariable <double> TH2(th2);
OctaveVariable <double> A1(a1);
OctaveVariable <double> D2(d2);
OctaveVariable <double> AL3(al3);
OctaveVariable <double> TMP;
HTMatrix dk_cpu;
// A_CPU_11
TMP = -(2*QW*QY + 2*QX*QZ)*sin(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QY->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900);
dk_cpu.m[0][0] = TMP.data;
// A_CPU_12
TMP = (2*QW*QY + 2*QX*QZ)*sin(AL3)/16331239353195370 + (-2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QY->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m[0][1] = TMP.data;
// A_CPU_13
TMP = (2*QW*QY + 2*QX*QZ)*cos(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370) + (sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2))*(-2*QY->*2 - 2*QZ->*2 + 1);
dk_cpu.m[0][2] = TMP.data;
// A_CPU_14
TMP = TX + A1*(-2*QY->*2 - 2*QZ->*2 + 1) + D2*(2*QW*QY + 2*QX*QZ);
dk_cpu.m[0][3] = TMP.data;
// A_CPU_21
TMP = -(-2*QW*QX + 2*QY*QZ)*sin(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) + (-2*QX->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m[1][0] = TMP.data;
// A_CPU_22
TMP = (-2*QW*QX + 2*QY*QZ)*sin(AL3)/16331239353195370 + (2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2));
dk_cpu.m[1][1] = TMP.data;
// A_CPU_23
TMP = (-2*QW*QX + 2*QY*QZ)*cos(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2)) + (sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370)*(-2*QX->*2 - 2*QZ->*2 + 1);
dk_cpu.m[1][2] = TMP.data;
// A_CPU_24
TMP = TY + A1*(2*QW*QZ + 2*QX*QY) + D2*(-2*QW*QX + 2*QY*QZ);
dk_cpu.m[1][3] = TMP.data;
// A_CPU_31
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) - (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3);
dk_cpu.m[2][0] = TMP.data;
// A_CPU_32
TMP = (2*QW*QX + 2*QY*QZ)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QW*QY + 2*QX*QZ)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3)/16331239353195370;
dk_cpu.m[2][1] = TMP.data;
// A_CPU_33
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2)) + (-2*QX->*2 - 2*QY->*2 + 1)*cos(AL3);
dk_cpu.m[2][2] = TMP.data;
// A_CPU_34
TMP = TZ + A1*(-2*QW*QY + 2*QX*QZ) + D2*(-2*QX->*2 - 2*QY->*2 + 1);
dk_cpu.m[2][3] = TMP.data;
// A_CPU_41
dk_cpu.m[3][0] = 0;
// A_CPU_42
dk_cpu.m[3][1] = 0;
// A_CPU_43
dk_cpu.m[3][2] = 0;
// A_CPU_44
dk_cpu.m[3][3] = 1;
return dk_cpu;
}
HTMatrixLidarCPU dkWorldToLidarReduced(
double tx,
double ty,
double tz,
double qx,
double qy,
double qz,
double qw,
double th2,
const double a1,
const double d2,
const double al3
)
{
OctaveVariable <double> TX(tx);
OctaveVariable <double> TY(ty);
OctaveVariable <double> TZ(tz);
OctaveVariable <double> QX(qx);
OctaveVariable <double> QY(qy);
OctaveVariable <double> QZ(qz);
OctaveVariable <double> QW(qw);
OctaveVariable <double> TH2(th2);
OctaveVariable <double> A1(a1);
OctaveVariable <double> D2(d2);
OctaveVariable <double> AL3(al3);
OctaveVariable <double> TMP;
HTMatrixLidarCPU dk_cpu;
// A_CPU_11
TMP = -(2*QW*QY + 2*QX*QZ)*sin(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QY->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900);
dk_cpu.m_0 = TMP.data;
// A_CPU_12
TMP = (2*QW*QY + 2*QX*QZ)*sin(AL3)/16331239353195370 + (-2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QY->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m_1 = TMP.data;
// A_CPU_14
TMP = TX + A1*(-2*QY->*2 - 2*QZ->*2 + 1) + D2*(2*QW*QY + 2*QX*QZ);
dk_cpu.m_3 = TMP.data;
// A_CPU_21
TMP = -(-2*QW*QX + 2*QY*QZ)*sin(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) + (-2*QX->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m_4 = TMP.data;
// A_CPU_22
TMP = (-2*QW*QX + 2*QY*QZ)*sin(AL3)/16331239353195370 + (2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2));
dk_cpu.m_5 = TMP.data;
// A_CPU_24
TMP = TY + A1*(2*QW*QZ + 2*QX*QY) + D2*(-2*QW*QX + 2*QY*QZ);
dk_cpu.m_7 = TMP.data;
// A_CPU_31
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) - (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3);
dk_cpu.m_8 = TMP.data;
// A_CPU_32
TMP = (2*QW*QX + 2*QY*QZ)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QW*QY + 2*QX*QZ)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3)/16331239353195370;
dk_cpu.m_9 = TMP.data;
// A_CPU_34
TMP = TZ + A1*(-2*QW*QY + 2*QX*QZ) + D2*(-2*QX->*2 - 2*QY->*2 + 1);
dk_cpu.m_11 = TMP.data;
return dk_cpu;
}
|
dc1fb4161adb3bfce58dedb9a29e68e1e92daccf.cu
|
#include "../include/gpu_001_lidar_mapping.cuh"
__device__ inline float calcLaserAngle(int laser_rays, float angle_min, float angle_max, int tid)
{
return ((float)tid+0.5)/laser_rays*(angle_max - angle_min) + angle_min;
}
__device__ inline Point3F32 dkLidarToScan(const HTMatrixLidarCPU* dk_cpu, float th5, float a5)
{
Point3F32 point;
// A_GPU_14
point.x = a5*dk_cpu->m_0*cos(th5) + a5*dk_cpu->m_1*sin(th5) + dk_cpu->m_3;
// A_GPU_24
point.y = a5*dk_cpu->m_4*cos(th5) + a5*dk_cpu->m_5*sin(th5) + dk_cpu->m_7;
// A_GPU_34
point.z = a5*dk_cpu->m_8*cos(th5) + a5*dk_cpu->m_9*sin(th5) + dk_cpu->m_11;
return point;
}
__device__ inline Point2I32 mapRealToGPU(float point_x, float point_y, float map_orient, float map_scale, float map_offset_pix)
{
Point2I32 map_pose;
float point_orient = atan2f(point_y, point_x);
float point_dist = sqrtf(point_x*point_x + point_y*point_y);
map_pose.x = (int) (sinf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix);
map_pose.y = (int) (cosf(map_orient - point_orient) * point_dist * map_scale + map_offset_pix);
return map_pose;
}
__global__ void lidarMappingKernel(
float* laser_scan,
const HTMatrixLidarCPU dk_cpu,
const int laser_rays,
const float angle_min,
const float angle_max,
int16_t* heightmap,
const int map_x,
const int map_y,
const int height_scale,
const int map_scale,
const float map_orient,
const float map_offset_pix,
float* debug)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// DISTANCE MEASURED BY LASER SCAN
float a5 = laser_scan[tid];
// ANGLE FROM MIDDLE OF SCANNING AREA
float th5 = calcLaserAngle(laser_rays, angle_min, angle_max, tid);
// GLOBAL POSITION OF POINT ON THE END OF THE SCAN
Point3F32 point_world = dkLidarToScan(&dk_cpu, th5, a5);
// POSITION OF SCAN POINT ON GPU HEIGHTMAP
Point2I32 point_map = mapRealToGPU(point_world.x, point_world.y, map_orient, map_scale, map_offset_pix);
// CHECKING IF SCAN POINT IS INSIDE GPU MAP
if(point_map.x >=0 && point_map.x < map_x && point_map.y >=0 && point_map.y < map_y)
{
// ASSIGN NEW POINT TO GPU MAP
heightmap[point_map.x * map_y + point_map.y] = (int16_t) (point_world.z * height_scale);
}
}
GpuLidarMapping::GpuLidarMapping(_RobotPlannerMaps *_rpm, _ROSBuffor *_ros)
{
this->_rpm = _rpm;
this->_ros = _ros;
}
void GpuLidarMapping::allocateMemory(int laser_rays, float angle_min, float angle_max)
{
this->laser_rays = laser_rays;
this->angle_min = angle_min;
this->angle_max = angle_max;
gpuErrchk(cudaMalloc((void**)&dev_laser_scan, laser_rays * sizeof(float)) );
gpuErrchk(cudaMalloc((void**)&dev_dk_matrix, 16 * sizeof(double)) );
}
void GpuLidarMapping::freeMemory()
{
gpuErrchk( cudaFree(dev_dk_matrix) );
gpuErrchk( cudaFree(dev_laser_scan) );
}
void GpuLidarMapping::drawInitialHeightmapCircle()
{
_rpm->dev_heightmap.drawCircle(init_circle_height, _rpm->map_offset_pix, _rpm->map_offset_pix, init_circle_radius); // -40 - height form rover center to bottom of its wheels
}
void GpuLidarMapping::copyInputToDevice()
{
// Copying laser scan to GPU
gpuErrchk( cudaMemcpy(this->dev_laser_scan, &_ros->laser_scan.ranges[0], this->laser_rays * sizeof(float), cudaMemcpyHostToDevice) );
}
void GpuLidarMapping::executeKernel()
{
// CPU part
this->dk_cpu = dkWorldToLidarReduced(
_ros->odom.pose.pose.position.x,
_ros->odom.pose.pose.position.y,
_ros->odom.pose.pose.position.z,
_ros->odom.pose.pose.orientation.x,
_ros->odom.pose.pose.orientation.y,
_ros->odom.pose.pose.orientation.z,
_ros->odom.pose.pose.orientation.w,
_ros->lidar_pose.data,
this->dk_a1,
this->dk_d2,
this->dk_al3);
// GPU part
lidarMappingKernel <<< this->laser_rays, 1 >>> (
this->dev_laser_scan,
this->dk_cpu,
this->laser_rays,
this->angle_min,
this->angle_max,
_rpm->dev_heightmap.data,
_rpm->dev_heightmap.size_x,
_rpm->dev_heightmap.size_y,
_rpm->height_scale,
_rpm->map_scale,
_rpm->map_orient,
_rpm->map_offset_pix,
_rpm->dev_debug);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
void GpuLidarMapping::copyOutputToHost()
{
gpuErrchk( cudaMemcpy(_rpm->host_heightmap.data, _rpm->dev_heightmap.data, _rpm->dev_heightmap.size() * sizeof(int16_t), cudaMemcpyDeviceToHost) );
}
void GpuLidarMapping::display()
{
_rpm->host_heightmap.display("heightmap");
}
HTMatrix dkWorldToLidar(
double tx,
double ty,
double tz,
double qx,
double qy,
double qz,
double qw,
double th2,
const double a1,
const double d2,
const double al3
)
{
OctaveVariable <double> TX(tx);
OctaveVariable <double> TY(ty);
OctaveVariable <double> TZ(tz);
OctaveVariable <double> QX(qx);
OctaveVariable <double> QY(qy);
OctaveVariable <double> QZ(qz);
OctaveVariable <double> QW(qw);
OctaveVariable <double> TH2(th2);
OctaveVariable <double> A1(a1);
OctaveVariable <double> D2(d2);
OctaveVariable <double> AL3(al3);
OctaveVariable <double> TMP;
HTMatrix dk_cpu;
// A_CPU_11
TMP = -(2*QW*QY + 2*QX*QZ)*sin(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QY->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900);
dk_cpu.m[0][0] = TMP.data;
// A_CPU_12
TMP = (2*QW*QY + 2*QX*QZ)*sin(AL3)/16331239353195370 + (-2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QY->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m[0][1] = TMP.data;
// A_CPU_13
TMP = (2*QW*QY + 2*QX*QZ)*cos(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370) + (sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2))*(-2*QY->*2 - 2*QZ->*2 + 1);
dk_cpu.m[0][2] = TMP.data;
// A_CPU_14
TMP = TX + A1*(-2*QY->*2 - 2*QZ->*2 + 1) + D2*(2*QW*QY + 2*QX*QZ);
dk_cpu.m[0][3] = TMP.data;
// A_CPU_21
TMP = -(-2*QW*QX + 2*QY*QZ)*sin(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) + (-2*QX->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m[1][0] = TMP.data;
// A_CPU_22
TMP = (-2*QW*QX + 2*QY*QZ)*sin(AL3)/16331239353195370 + (2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2));
dk_cpu.m[1][1] = TMP.data;
// A_CPU_23
TMP = (-2*QW*QX + 2*QY*QZ)*cos(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2)) + (sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370)*(-2*QX->*2 - 2*QZ->*2 + 1);
dk_cpu.m[1][2] = TMP.data;
// A_CPU_24
TMP = TY + A1*(2*QW*QZ + 2*QX*QY) + D2*(-2*QW*QX + 2*QY*QZ);
dk_cpu.m[1][3] = TMP.data;
// A_CPU_31
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) - (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3);
dk_cpu.m[2][0] = TMP.data;
// A_CPU_32
TMP = (2*QW*QX + 2*QY*QZ)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QW*QY + 2*QX*QZ)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3)/16331239353195370;
dk_cpu.m[2][1] = TMP.data;
// A_CPU_33
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*sin(AL3) - sin(AL3)*cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*sin(AL3)/16331239353195370 + sin(AL3)*cos(TH2)) + (-2*QX->*2 - 2*QY->*2 + 1)*cos(AL3);
dk_cpu.m[2][2] = TMP.data;
// A_CPU_34
TMP = TZ + A1*(-2*QW*QY + 2*QX*QZ) + D2*(-2*QX->*2 - 2*QY->*2 + 1);
dk_cpu.m[2][3] = TMP.data;
// A_CPU_41
dk_cpu.m[3][0] = 0;
// A_CPU_42
dk_cpu.m[3][1] = 0;
// A_CPU_43
dk_cpu.m[3][2] = 0;
// A_CPU_44
dk_cpu.m[3][3] = 1;
return dk_cpu;
}
HTMatrixLidarCPU dkWorldToLidarReduced(
double tx,
double ty,
double tz,
double qx,
double qy,
double qz,
double qw,
double th2,
const double a1,
const double d2,
const double al3
)
{
OctaveVariable <double> TX(tx);
OctaveVariable <double> TY(ty);
OctaveVariable <double> TZ(tz);
OctaveVariable <double> QX(qx);
OctaveVariable <double> QY(qy);
OctaveVariable <double> QZ(qz);
OctaveVariable <double> QW(qw);
OctaveVariable <double> TH2(th2);
OctaveVariable <double> A1(a1);
OctaveVariable <double> D2(d2);
OctaveVariable <double> AL3(al3);
OctaveVariable <double> TMP;
HTMatrixLidarCPU dk_cpu;
// A_CPU_11
TMP = -(2*QW*QY + 2*QX*QZ)*sin(AL3) + (-2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QY->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900);
dk_cpu.m_0 = TMP.data;
// A_CPU_12
TMP = (2*QW*QY + 2*QX*QZ)*sin(AL3)/16331239353195370 + (-2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QY->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m_1 = TMP.data;
// A_CPU_14
TMP = TX + A1*(-2*QY->*2 - 2*QZ->*2 + 1) + D2*(2*QW*QY + 2*QX*QZ);
dk_cpu.m_3 = TMP.data;
// A_CPU_21
TMP = -(-2*QW*QX + 2*QY*QZ)*sin(AL3) + (2*QW*QZ + 2*QX*QY)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) + (-2*QX->*2 - 2*QZ->*2 + 1)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370);
dk_cpu.m_4 = TMP.data;
// A_CPU_22
TMP = (-2*QW*QX + 2*QY*QZ)*sin(AL3)/16331239353195370 + (2*QW*QZ + 2*QX*QY)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QZ->*2 + 1)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2));
dk_cpu.m_5 = TMP.data;
// A_CPU_24
TMP = TY + A1*(2*QW*QZ + 2*QX*QY) + D2*(-2*QW*QX + 2*QY*QZ);
dk_cpu.m_7 = TMP.data;
// A_CPU_31
TMP = (2*QW*QX + 2*QY*QZ)*(sin(TH2)*cos(AL3) + sin(TH2)/266709378811357127073829389436900 - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QW*QY + 2*QX*QZ)*(sin(TH2)*cos(AL3)/16331239353195370 - sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3) + cos(TH2)/266709378811357127073829389436900) - (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3);
dk_cpu.m_8 = TMP.data;
// A_CPU_32
TMP = (2*QW*QX + 2*QY*QZ)*(-sin(TH2)*cos(AL3)/16331239353195370 + sin(TH2)/16331239353195370 + cos(TH2)*cos(AL3)/266709378811357127073829389436900 + cos(TH2)) + (-2*QW*QY + 2*QX*QZ)*(-sin(TH2)*cos(AL3)/266709378811357127073829389436900 - sin(TH2) - cos(TH2)*cos(AL3)/16331239353195370 + cos(TH2)/16331239353195370) + (-2*QX->*2 - 2*QY->*2 + 1)*sin(AL3)/16331239353195370;
dk_cpu.m_9 = TMP.data;
// A_CPU_34
TMP = TZ + A1*(-2*QW*QY + 2*QX*QZ) + D2*(-2*QX->*2 - 2*QY->*2 + 1);
dk_cpu.m_11 = TMP.data;
return dk_cpu;
}
|
79ade97bd07ede7f4b998f804c30e88d50da5bc9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompareT.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include <THH/THHGenerateShortType.h>
|
79ade97bd07ede7f4b998f804c30e88d50da5bc9.cu
|
#include "../THCTensorMathCompareT.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompareT.cu"
#include <THC/THCGenerateShortType.h>
|
eaebdd3fa471448c849f985fd07d736775696eac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout/kern_contiguous.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
// dst is contiguous
void copy_last_contiguous(
const TensorND& dst, const TensorND& src, size_t contiguous_size,
hipStream_t stream) {
ElemwiseOpParamN<2> param;
param[0] = dst;
param[1] = src;
#define RUN(_dt) \
do { \
typedef DTypeTrait<dtype::_dt>::ctype ctype; \
param[0].layout.dtype = param[1].layout.dtype = dtype::_dt(); \
param.init_from_given_tensor(); \
param.assert_initialized(); \
contiguous_intl::UserOpInvoker<ctype, 2>(param, stream, contiguous_size); \
return; \
} while (0)
switch (dst.layout.dtype.size()) {
case 1:
RUN(Byte);
case 2:
RUN(Float16);
case 4:
RUN(Int32);
}
megdnn_assert(0, "bad dtype size");
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
eaebdd3fa471448c849f985fd07d736775696eac.cu
|
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout/kern_contiguous.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
// dst is contiguous
void copy_last_contiguous(
const TensorND& dst, const TensorND& src, size_t contiguous_size,
cudaStream_t stream) {
ElemwiseOpParamN<2> param;
param[0] = dst;
param[1] = src;
#define RUN(_dt) \
do { \
typedef DTypeTrait<dtype::_dt>::ctype ctype; \
param[0].layout.dtype = param[1].layout.dtype = dtype::_dt(); \
param.init_from_given_tensor(); \
param.assert_initialized(); \
contiguous_intl::UserOpInvoker<ctype, 2>(param, stream, contiguous_size); \
return; \
} while (0)
switch (dst.layout.dtype.size()) {
case 1:
RUN(Byte);
case 2:
RUN(Float16);
case 4:
RUN(Int32);
}
megdnn_assert(0, "bad dtype size");
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
467dca96642631cdd1284e12faba92608e6e608c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "./../main.h" // (in main directory) needed to recognized input parameters
#include "./../util/avi/avilib.h" // (in directory) needed by avi functions
#include "./../util/avi/avimod.h" // (in directory) needed by avi functions
// CUDA kernel
#include "kernel.h"
void
kernel_gpu_wrapper( params_common common,
int* endoRow,
int* endoCol,
int* tEndoRowLoc,
int* tEndoColLoc,
int* epiRow,
int* epiCol,
int* tEpiRowLoc,
int* tEpiColLoc,
avi_t* frames)
{
// common
//printf("tSize is %d, sSize is %d\n", common.tSize, common.sSize);
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//==================================================50
// endo points templates
//==================================================50
fp* d_endoT;
hipMalloc((void**)&d_endoT, common.in_mem * common.endoPoints);
//printf("%d\n", common.in_elem * common.endoPoints);
//==================================================50
// epi points templates
//==================================================50
fp* d_epiT;
hipMalloc((void**)&d_epiT, common.in_mem * common.epiPoints);
//====================================================================================================100
// AREA AROUND POINT FROM FRAME (LOCAL)
//====================================================================================================100
// common
common.in2_rows = common.sSize + 1 + common.sSize;
common.in2_cols = common.in2_rows;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(fp) * common.in2_elem;
fp* d_in2;
hipMalloc((void**)&d_in2, common.in2_mem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
//====================================================================================================100
// CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(fp) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// unique
fp* d_conv;
hipMalloc((void**)&d_conv, common.conv_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM (LOCAL)
//====================================================================================================100
//==================================================50
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(fp) * common.in2_pad_cumv_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv(common.in2_pad_cumv_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_elem * common.allPoints);
fp* d_in2_pad_cumv;
hipMalloc((void**)&d_in2_pad_cumv, common.in2_pad_cumv_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(fp) * common.in2_pad_cumv_sel_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv_sel(common.in2_pad_cumv_sel_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_sel_elem * common.allPoints);
fp* d_in2_pad_cumv_sel;
hipMalloc((void**)&d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(fp) * common.in2_sub_cumh_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh(common.in2_sub_cumh_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_elem * common.allPoints);
fp* d_in2_sub_cumh;
hipMalloc((void**)&d_in2_sub_cumh, common.in2_sub_cumh_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(fp) * common.in2_sub_cumh_sel_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh_sel(common.in2_sub_cumh_sel_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_sel_elem * common.allPoints);
fp* d_in2_sub_cumh_sel;
hipMalloc((void**)&d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(fp) * common.in2_sub2_elem;
// unique
//buffer<fp,1> d_in2_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sub2;
hipMalloc((void**)&d_in2_sub2, common.in2_sub2_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM 2 (LOCAL)
//====================================================================================================100
//==================================================50
// MULTIPLICATION
//==================================================50
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// unique
//buffer<fp,1> d_in2_sqr(common.in2_elem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
fp* d_in2_sqr;
hipMalloc((void**)&d_in2_sqr, common.in2_sqr_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// unique
//buffer<fp,1> d_in2_sqr_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sqr_sub2;
hipMalloc((void**)&d_in2_sqr_sub2, common.in2_sqr_sub2_mem * common.allPoints);
//====================================================================================================100
// FINAL (LOCAL)
//====================================================================================================100
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// unique
//buffer<fp,1> d_in_sqr(common.in_elem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
fp* d_in_sqr;
hipMalloc((void**)&d_in_sqr, common.in_sqr_mem * common.allPoints);
//====================================================================================================100
// TEMPLATE MASK CREATE (LOCAL)
//====================================================================================================100
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(fp) * common.tMask_elem;
// unique
//buffer<fp,1> d_tMask(common.tMask_elem * common.allPoints);
//printf("%d\n", common.tMask_elem * common.allPoints);
fp* d_tMask;
hipMalloc((void**)&d_tMask, common.tMask_mem * common.allPoints);
//====================================================================================================100
// POINT MASK INITIALIZE (LOCAL)
//====================================================================================================100
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(fp) * common.mask_elem;
//====================================================================================================100
// MASK CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(fp) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
//printf("common.endPoints=%d\n", common.endoPoints); // 20
//printf("common.epiPoints=%d\n", common.epiPoints); // 31
//printf("common.in_elem=%d\n", common.in_elem);
//printf("common.endo_mem=%d\n", common.endo_mem); // 80
//printf("common.epi_mem=%d\n", common.epi_mem); // 124
//
//buffer<params_common,1> d_common(&common, 1, props); // range is 1 ?
//buffer<int,1> d_endoRow(endoRow, common.endoPoints, props);
//d_endoRow.set_final_data(nullptr);
//buffer<int,1> d_endoCol(endoCol, common.endoPoints, props);
//d_endoCol.set_final_data(nullptr);
//buffer<int,1> d_tEndoRowLoc(tEndoRowLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_tEndoColLoc(tEndoColLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_epiRow(epiRow, common.epiPoints, props);
//d_epiRow.set_final_data(nullptr);
//buffer<int,1> d_epiCol(epiCol, common.epiPoints, props);
//d_epiCol.set_final_data(nullptr);
//buffer<int,1> d_tEpiRowLoc(tEpiRowLoc, common.epiPoints * common.no_frames, props);
//buffer<int,1> d_tEpiColLoc(tEpiColLoc, common.epiPoints * common.no_frames, props);
int* d_endoRow;
hipMalloc((void**)&d_endoRow, common.endo_mem);
hipMemcpy(d_endoRow, endoRow, common.endo_mem, hipMemcpyHostToDevice);
int* d_endoCol;
hipMalloc((void**)&d_endoCol, common.endo_mem);
hipMemcpy(d_endoCol, endoCol, common.endo_mem, hipMemcpyHostToDevice);
int* d_tEndoRowLoc;
int* d_tEndoColLoc;
hipMalloc((void**)&d_tEndoRowLoc, common.endo_mem*common.no_frames);
hipMemcpy(d_tEndoRowLoc, tEndoRowLoc, common.endo_mem*common.no_frames, hipMemcpyHostToDevice);
hipMalloc((void**)&d_tEndoColLoc, common.endo_mem*common.no_frames);
hipMemcpy(d_tEndoColLoc, tEndoColLoc, common.endo_mem*common.no_frames, hipMemcpyHostToDevice);
int* d_epiRow;
int* d_epiCol;
hipMalloc((void**)&d_epiRow, common.epi_mem);
hipMemcpy(d_epiRow, epiRow, common.epi_mem, hipMemcpyHostToDevice);
hipMalloc((void**)&d_epiCol, common.epi_mem);
hipMemcpy(d_epiCol, epiCol, common.epi_mem, hipMemcpyHostToDevice);
int* d_tEpiRowLoc;
int* d_tEpiColLoc;
hipMalloc((void**)&d_tEpiRowLoc, common.epi_mem*common.no_frames);
hipMemcpy(d_tEpiRowLoc, tEpiRowLoc, common.epi_mem*common.no_frames, hipMemcpyHostToDevice);
hipMalloc((void**)&d_tEpiColLoc, common.epi_mem*common.no_frames);
hipMemcpy(d_tEpiColLoc, tEpiColLoc, common.epi_mem*common.no_frames, hipMemcpyHostToDevice);
//buffer<fp,1> d_mask_conv(common.mask_conv_elem * common.allPoints);
//d_mask_conv.set_final_data(nullptr);
fp* d_mask_conv;
hipMalloc((void**)&d_mask_conv, common.mask_conv_mem * common.allPoints);
//printf("%d\n", common.mask_conv_elem * common.allPoints);
//buffer<fp,1> d_in_mod_temp(common.in_elem * common.allPoints);
//d_in_mod_temp.set_final_data(nullptr);
fp* d_in_mod_temp;
hipMalloc((void**)&d_in_mod_temp, common.in_mem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
//buffer<fp,1> d_in_partial_sum(common.in_cols * common.allPoints);
//d_in_partial_sum.set_final_data(nullptr);
fp* d_in_partial_sum;
hipMalloc((void**)&d_in_partial_sum, sizeof(fp)*common.in_cols * common.allPoints);
//printf("%d\n", common.in_cols * common.allPoints);
//buffer<fp,1> d_in_sqr_partial_sum(common.in_sqr_rows * common.allPoints);
//d_in_sqr_partial_sum.set_final_data(nullptr);
fp* d_in_sqr_partial_sum;
hipMalloc((void**)&d_in_sqr_partial_sum, sizeof(fp)*common.in_sqr_rows * common.allPoints);
//printf("%d\n", common.in_sqr_rows * common.allPoints);
//buffer<fp,1> d_par_max_val(common.mask_conv_rows * common.allPoints);
//d_par_max_val.set_final_data(nullptr);
fp* d_par_max_val;
hipMalloc((void**)&d_par_max_val, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//printf("%d\n", common.mask_conv_rows * common.allPoints);
//buffer<int,1> d_par_max_coo( common.mask_conv_rows * common.allPoints);
//d_par_max_coo.set_final_data(nullptr);
fp* d_par_max_coo;
hipMalloc((void**)&d_par_max_coo, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//buffer<fp,1> d_in_final_sum(common.allPoints);
//d_in_final_sum.set_final_data(nullptr);
fp* d_in_final_sum;
hipMalloc((void**)&d_in_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_in_sqr_final_sum(common.allPoints);
//d_in_sqr_final_sum.set_final_data(nullptr);
fp* d_in_sqr_final_sum;
hipMalloc((void**)&d_in_sqr_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_denomT(common.allPoints);
//d_denomT.set_final_data(nullptr);
fp* d_denomT;
hipMalloc((void**)&d_denomT, sizeof(fp)*common.allPoints);
#ifdef TEST_CHECKSUM
//buffer<fp,1> d_checksum(CHECK);
//d_checksum.set_final_data(nullptr);
//printf("%d\n", CHECK);
fp* checksum = (fp*) malloc (sizeof(fp)*CHECK);
fp* d_checksum;
hipMalloc((void**)&d_checksum, sizeof(fp)*CHECK);
#endif
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
dim3 threads(NUMBER_THREADS);
dim3 grids(common.allPoints);
printf("frame progress: ");
fflush(NULL);
//====================================================================================================100
// LAUNCH
//====================================================================================================100
// variables
fp* frame;
int frame_no;
//buffer<fp,1> d_frame(common.frame_elem);
fp* d_frame;
hipMalloc((void**)&d_frame, sizeof(fp)*common.frame_elem);
for(frame_no=0; frame_no<common.frames_processed; frame_no++) {
//==================================================50
// get and write current frame to GPU buffer
//==================================================50
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
hipMemcpy(d_frame, frame, sizeof(fp)*common.frame_elem, hipMemcpyHostToDevice);
//==================================================50
// launch kernel
//==================================================50
hipLaunchKernelGGL(( hw), dim3(grids), dim3(threads), 0, 0,
frame_no,
common,
d_frame,
d_endoRow,
d_endoCol,
d_tEndoRowLoc,
d_tEndoColLoc,
d_epiRow,
d_epiCol,
d_tEpiRowLoc,
d_tEpiColLoc,
d_endoT,
d_epiT,
d_in2,
d_conv,
d_in2_pad_cumv,
d_in2_pad_cumv_sel,
d_in2_sub_cumh,
d_in2_sub_cumh_sel,
d_in2_sub2,
d_in2_sqr,
d_in2_sqr_sub2,
d_in_sqr,
d_tMask,
d_mask_conv,
d_in_mod_temp,
d_in_partial_sum,
d_in_sqr_partial_sum,
d_par_max_val,
d_par_max_coo,
d_in_final_sum,
d_in_sqr_final_sum,
d_denomT
#ifdef TEST_CHECKSUM
,d_checksum
#endif
);
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
//==================================================50
// print frame progress
//==================================================50
// print frame progress
printf("%d ", frame_no);
fflush(NULL);
//==================================================50
// DISPLAY CHECKSUM (TESTING)
//==================================================50
#ifdef TEST_CHECKSUM
hipMemcpy(checksum, d_checksum, sizeof(fp)*CHECK, hipMemcpyDeviceToHost);
printf("CHECKSUM:\n");
for(int i=0; i<CHECK; i++){
printf("i=%d checksum=%f\n", i, checksum[i]);
}
printf("\n\n");
#endif
}
hipMemcpy(tEndoRowLoc, d_tEndoRowLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(tEndoColLoc, d_tEndoColLoc, common.endo_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(tEpiRowLoc, d_tEpiRowLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
hipMemcpy(tEpiColLoc, d_tEpiColLoc, common.epi_mem * common.no_frames, hipMemcpyDeviceToHost);
//====================================================================================================100
// PRINT FRAME PROGRESS END
//====================================================================================================100
#ifdef TEST_CHECKSUM
free(checksum);
hipFree(d_checksum);
#endif
hipFree(d_epiT);
hipFree(d_in2);
hipFree(d_conv);
hipFree(d_in2_pad_cumv);
hipFree(d_in2_pad_cumv_sel);
hipFree(d_in2_sub_cumh);
hipFree(d_in2_sub_cumh_sel);
hipFree(d_in2_sub2);
hipFree(d_in2_sqr);
hipFree(d_in2_sqr_sub2);
hipFree(d_in_sqr);
hipFree(d_tMask);
hipFree(d_endoRow);
hipFree(d_endoCol);
hipFree(d_tEndoRowLoc);
hipFree(d_tEndoColLoc);
hipFree(d_epiRow);
hipFree(d_epiCol);
hipFree(d_tEpiRowLoc);
hipFree(d_tEpiColLoc);
hipFree(d_mask_conv);
hipFree(d_in_mod_temp);
hipFree(d_in_partial_sum);
hipFree(d_in_sqr_partial_sum);
hipFree(d_par_max_val);
hipFree(d_par_max_coo);
hipFree(d_in_final_sum);
hipFree(d_in_sqr_final_sum);
hipFree(d_denomT);
hipFree(d_frame);
printf("\n");
fflush(NULL);
}
|
467dca96642631cdd1284e12faba92608e6e608c.cu
|
#include <cuda.h>
#include "./../main.h" // (in main directory) needed to recognized input parameters
#include "./../util/avi/avilib.h" // (in directory) needed by avi functions
#include "./../util/avi/avimod.h" // (in directory) needed by avi functions
// CUDA kernel
#include "kernel.h"
void
kernel_gpu_wrapper( params_common common,
int* endoRow,
int* endoCol,
int* tEndoRowLoc,
int* tEndoColLoc,
int* epiRow,
int* epiCol,
int* tEpiRowLoc,
int* tEpiColLoc,
avi_t* frames)
{
// common
//printf("tSize is %d, sSize is %d\n", common.tSize, common.sSize);
common.in_rows = common.tSize + 1 + common.tSize;
common.in_cols = common.in_rows;
common.in_elem = common.in_rows * common.in_cols;
common.in_mem = sizeof(fp) * common.in_elem;
//==================================================50
// endo points templates
//==================================================50
fp* d_endoT;
cudaMalloc((void**)&d_endoT, common.in_mem * common.endoPoints);
//printf("%d\n", common.in_elem * common.endoPoints);
//==================================================50
// epi points templates
//==================================================50
fp* d_epiT;
cudaMalloc((void**)&d_epiT, common.in_mem * common.epiPoints);
//====================================================================================================100
// AREA AROUND POINT FROM FRAME (LOCAL)
//====================================================================================================100
// common
common.in2_rows = common.sSize + 1 + common.sSize;
common.in2_cols = common.in2_rows;
common.in2_elem = common.in2_rows * common.in2_cols;
common.in2_mem = sizeof(fp) * common.in2_elem;
fp* d_in2;
cudaMalloc((void**)&d_in2, common.in2_mem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
//====================================================================================================100
// CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.conv_rows = common.in_rows + common.in2_rows - 1; // number of rows in I
common.conv_cols = common.in_cols + common.in2_cols - 1; // number of columns in I
common.conv_elem = common.conv_rows * common.conv_cols; // number of elements
common.conv_mem = sizeof(fp) * common.conv_elem;
common.ioffset = 0;
common.joffset = 0;
// unique
fp* d_conv;
cudaMalloc((void**)&d_conv, common.conv_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM (LOCAL)
//====================================================================================================100
//==================================================50
// PADDING OF ARRAY, VERTICAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_add_rows = common.in_rows;
common.in2_pad_add_cols = common.in_cols;
common.in2_pad_cumv_rows = common.in2_rows + 2*common.in2_pad_add_rows;
common.in2_pad_cumv_cols = common.in2_cols + 2*common.in2_pad_add_cols;
common.in2_pad_cumv_elem = common.in2_pad_cumv_rows * common.in2_pad_cumv_cols;
common.in2_pad_cumv_mem = sizeof(fp) * common.in2_pad_cumv_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv(common.in2_pad_cumv_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_elem * common.allPoints);
fp* d_in2_pad_cumv;
cudaMalloc((void**)&d_in2_pad_cumv, common.in2_pad_cumv_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_pad_cumv_sel_rowlow = 1 + common.in_rows; // (1 to n+1)
common.in2_pad_cumv_sel_rowhig = common.in2_pad_cumv_rows - 1;
common.in2_pad_cumv_sel_collow = 1;
common.in2_pad_cumv_sel_colhig = common.in2_pad_cumv_cols;
common.in2_pad_cumv_sel_rows = common.in2_pad_cumv_sel_rowhig - common.in2_pad_cumv_sel_rowlow + 1;
common.in2_pad_cumv_sel_cols = common.in2_pad_cumv_sel_colhig - common.in2_pad_cumv_sel_collow + 1;
common.in2_pad_cumv_sel_elem = common.in2_pad_cumv_sel_rows * common.in2_pad_cumv_sel_cols;
common.in2_pad_cumv_sel_mem = sizeof(fp) * common.in2_pad_cumv_sel_elem;
// unique
//buffer<fp,1> d_in2_pad_cumv_sel(common.in2_pad_cumv_sel_elem * common.allPoints);
//printf("%d\n", common.in2_pad_cumv_sel_elem * common.allPoints);
fp* d_in2_pad_cumv_sel;
cudaMalloc((void**)&d_in2_pad_cumv_sel, common.in2_pad_cumv_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION, HORIZONTAL CUMULATIVE SUM
//==================================================50
// common
common.in2_pad_cumv_sel2_rowlow = 1;
common.in2_pad_cumv_sel2_rowhig = common.in2_pad_cumv_rows - common.in_rows - 1;
common.in2_pad_cumv_sel2_collow = 1;
common.in2_pad_cumv_sel2_colhig = common.in2_pad_cumv_cols;
common.in2_sub_cumh_rows = common.in2_pad_cumv_sel2_rowhig - common.in2_pad_cumv_sel2_rowlow + 1;
common.in2_sub_cumh_cols = common.in2_pad_cumv_sel2_colhig - common.in2_pad_cumv_sel2_collow + 1;
common.in2_sub_cumh_elem = common.in2_sub_cumh_rows * common.in2_sub_cumh_cols;
common.in2_sub_cumh_mem = sizeof(fp) * common.in2_sub_cumh_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh(common.in2_sub_cumh_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_elem * common.allPoints);
fp* d_in2_sub_cumh;
cudaMalloc((void**)&d_in2_sub_cumh, common.in2_sub_cumh_mem * common.allPoints);
//==================================================50
// SELECTION
//==================================================50
// common
common.in2_sub_cumh_sel_rowlow = 1;
common.in2_sub_cumh_sel_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel_collow = 1 + common.in_cols;
common.in2_sub_cumh_sel_colhig = common.in2_sub_cumh_cols - 1;
common.in2_sub_cumh_sel_rows = common.in2_sub_cumh_sel_rowhig - common.in2_sub_cumh_sel_rowlow + 1;
common.in2_sub_cumh_sel_cols = common.in2_sub_cumh_sel_colhig - common.in2_sub_cumh_sel_collow + 1;
common.in2_sub_cumh_sel_elem = common.in2_sub_cumh_sel_rows * common.in2_sub_cumh_sel_cols;
common.in2_sub_cumh_sel_mem = sizeof(fp) * common.in2_sub_cumh_sel_elem;
// unique
//buffer<fp,1> d_in2_sub_cumh_sel(common.in2_sub_cumh_sel_elem * common.allPoints);
//printf("%d\n", common.in2_sub_cumh_sel_elem * common.allPoints);
fp* d_in2_sub_cumh_sel;
cudaMalloc((void**)&d_in2_sub_cumh_sel, common.in2_sub_cumh_sel_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sub_cumh_sel2_rowlow = 1;
common.in2_sub_cumh_sel2_rowhig = common.in2_sub_cumh_rows;
common.in2_sub_cumh_sel2_collow = 1;
common.in2_sub_cumh_sel2_colhig = common.in2_sub_cumh_cols - common.in_cols - 1;
common.in2_sub2_rows = common.in2_sub_cumh_sel2_rowhig - common.in2_sub_cumh_sel2_rowlow + 1;
common.in2_sub2_cols = common.in2_sub_cumh_sel2_colhig - common.in2_sub_cumh_sel2_collow + 1;
common.in2_sub2_elem = common.in2_sub2_rows * common.in2_sub2_cols;
common.in2_sub2_mem = sizeof(fp) * common.in2_sub2_elem;
// unique
//buffer<fp,1> d_in2_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sub2;
cudaMalloc((void**)&d_in2_sub2, common.in2_sub2_mem * common.allPoints);
//====================================================================================================100
// CUMULATIVE SUM 2 (LOCAL)
//====================================================================================================100
//==================================================50
// MULTIPLICATION
//==================================================50
// common
common.in2_sqr_rows = common.in2_rows;
common.in2_sqr_cols = common.in2_cols;
common.in2_sqr_elem = common.in2_elem;
common.in2_sqr_mem = common.in2_mem;
// unique
//buffer<fp,1> d_in2_sqr(common.in2_elem * common.allPoints);
//printf("%d\n", common.in2_elem * common.allPoints);
fp* d_in2_sqr;
cudaMalloc((void**)&d_in2_sqr, common.in2_sqr_mem * common.allPoints);
//==================================================50
// SELECTION 2, SUBTRACTION
//==================================================50
// common
common.in2_sqr_sub2_rows = common.in2_sub2_rows;
common.in2_sqr_sub2_cols = common.in2_sub2_cols;
common.in2_sqr_sub2_elem = common.in2_sub2_elem;
common.in2_sqr_sub2_mem = common.in2_sub2_mem;
// unique
//buffer<fp,1> d_in2_sqr_sub2(common.in2_sub2_elem * common.allPoints);
//printf("%d\n", common.in2_sub2_elem * common.allPoints);
fp* d_in2_sqr_sub2;
cudaMalloc((void**)&d_in2_sqr_sub2, common.in2_sqr_sub2_mem * common.allPoints);
//====================================================================================================100
// FINAL (LOCAL)
//====================================================================================================100
// common
common.in_sqr_rows = common.in_rows;
common.in_sqr_cols = common.in_cols;
common.in_sqr_elem = common.in_elem;
common.in_sqr_mem = common.in_mem;
// unique
//buffer<fp,1> d_in_sqr(common.in_elem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
fp* d_in_sqr;
cudaMalloc((void**)&d_in_sqr, common.in_sqr_mem * common.allPoints);
//====================================================================================================100
// TEMPLATE MASK CREATE (LOCAL)
//====================================================================================================100
// common
common.tMask_rows = common.in_rows + (common.sSize+1+common.sSize) - 1;
common.tMask_cols = common.tMask_rows;
common.tMask_elem = common.tMask_rows * common.tMask_cols;
common.tMask_mem = sizeof(fp) * common.tMask_elem;
// unique
//buffer<fp,1> d_tMask(common.tMask_elem * common.allPoints);
//printf("%d\n", common.tMask_elem * common.allPoints);
fp* d_tMask;
cudaMalloc((void**)&d_tMask, common.tMask_mem * common.allPoints);
//====================================================================================================100
// POINT MASK INITIALIZE (LOCAL)
//====================================================================================================100
// common
common.mask_rows = common.maxMove;
common.mask_cols = common.mask_rows;
common.mask_elem = common.mask_rows * common.mask_cols;
common.mask_mem = sizeof(fp) * common.mask_elem;
//====================================================================================================100
// MASK CONVOLUTION (LOCAL)
//====================================================================================================100
// common
common.mask_conv_rows = common.tMask_rows; // number of rows in I
common.mask_conv_cols = common.tMask_cols; // number of columns in I
common.mask_conv_elem = common.mask_conv_rows * common.mask_conv_cols; // number of elements
common.mask_conv_mem = sizeof(fp) * common.mask_conv_elem;
common.mask_conv_ioffset = (common.mask_rows-1)/2;
if((common.mask_rows-1) % 2 > 0.5){
common.mask_conv_ioffset = common.mask_conv_ioffset + 1;
}
common.mask_conv_joffset = (common.mask_cols-1)/2;
if((common.mask_cols-1) % 2 > 0.5){
common.mask_conv_joffset = common.mask_conv_joffset + 1;
}
//printf("common.endPoints=%d\n", common.endoPoints); // 20
//printf("common.epiPoints=%d\n", common.epiPoints); // 31
//printf("common.in_elem=%d\n", common.in_elem);
//printf("common.endo_mem=%d\n", common.endo_mem); // 80
//printf("common.epi_mem=%d\n", common.epi_mem); // 124
//
//buffer<params_common,1> d_common(&common, 1, props); // range is 1 ?
//buffer<int,1> d_endoRow(endoRow, common.endoPoints, props);
//d_endoRow.set_final_data(nullptr);
//buffer<int,1> d_endoCol(endoCol, common.endoPoints, props);
//d_endoCol.set_final_data(nullptr);
//buffer<int,1> d_tEndoRowLoc(tEndoRowLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_tEndoColLoc(tEndoColLoc, common.endoPoints * common.no_frames, props);
//buffer<int,1> d_epiRow(epiRow, common.epiPoints, props);
//d_epiRow.set_final_data(nullptr);
//buffer<int,1> d_epiCol(epiCol, common.epiPoints, props);
//d_epiCol.set_final_data(nullptr);
//buffer<int,1> d_tEpiRowLoc(tEpiRowLoc, common.epiPoints * common.no_frames, props);
//buffer<int,1> d_tEpiColLoc(tEpiColLoc, common.epiPoints * common.no_frames, props);
int* d_endoRow;
cudaMalloc((void**)&d_endoRow, common.endo_mem);
cudaMemcpy(d_endoRow, endoRow, common.endo_mem, cudaMemcpyHostToDevice);
int* d_endoCol;
cudaMalloc((void**)&d_endoCol, common.endo_mem);
cudaMemcpy(d_endoCol, endoCol, common.endo_mem, cudaMemcpyHostToDevice);
int* d_tEndoRowLoc;
int* d_tEndoColLoc;
cudaMalloc((void**)&d_tEndoRowLoc, common.endo_mem*common.no_frames);
cudaMemcpy(d_tEndoRowLoc, tEndoRowLoc, common.endo_mem*common.no_frames, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_tEndoColLoc, common.endo_mem*common.no_frames);
cudaMemcpy(d_tEndoColLoc, tEndoColLoc, common.endo_mem*common.no_frames, cudaMemcpyHostToDevice);
int* d_epiRow;
int* d_epiCol;
cudaMalloc((void**)&d_epiRow, common.epi_mem);
cudaMemcpy(d_epiRow, epiRow, common.epi_mem, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_epiCol, common.epi_mem);
cudaMemcpy(d_epiCol, epiCol, common.epi_mem, cudaMemcpyHostToDevice);
int* d_tEpiRowLoc;
int* d_tEpiColLoc;
cudaMalloc((void**)&d_tEpiRowLoc, common.epi_mem*common.no_frames);
cudaMemcpy(d_tEpiRowLoc, tEpiRowLoc, common.epi_mem*common.no_frames, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_tEpiColLoc, common.epi_mem*common.no_frames);
cudaMemcpy(d_tEpiColLoc, tEpiColLoc, common.epi_mem*common.no_frames, cudaMemcpyHostToDevice);
//buffer<fp,1> d_mask_conv(common.mask_conv_elem * common.allPoints);
//d_mask_conv.set_final_data(nullptr);
fp* d_mask_conv;
cudaMalloc((void**)&d_mask_conv, common.mask_conv_mem * common.allPoints);
//printf("%d\n", common.mask_conv_elem * common.allPoints);
//buffer<fp,1> d_in_mod_temp(common.in_elem * common.allPoints);
//d_in_mod_temp.set_final_data(nullptr);
fp* d_in_mod_temp;
cudaMalloc((void**)&d_in_mod_temp, common.in_mem * common.allPoints);
//printf("%d\n", common.in_elem * common.allPoints);
//buffer<fp,1> d_in_partial_sum(common.in_cols * common.allPoints);
//d_in_partial_sum.set_final_data(nullptr);
fp* d_in_partial_sum;
cudaMalloc((void**)&d_in_partial_sum, sizeof(fp)*common.in_cols * common.allPoints);
//printf("%d\n", common.in_cols * common.allPoints);
//buffer<fp,1> d_in_sqr_partial_sum(common.in_sqr_rows * common.allPoints);
//d_in_sqr_partial_sum.set_final_data(nullptr);
fp* d_in_sqr_partial_sum;
cudaMalloc((void**)&d_in_sqr_partial_sum, sizeof(fp)*common.in_sqr_rows * common.allPoints);
//printf("%d\n", common.in_sqr_rows * common.allPoints);
//buffer<fp,1> d_par_max_val(common.mask_conv_rows * common.allPoints);
//d_par_max_val.set_final_data(nullptr);
fp* d_par_max_val;
cudaMalloc((void**)&d_par_max_val, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//printf("%d\n", common.mask_conv_rows * common.allPoints);
//buffer<int,1> d_par_max_coo( common.mask_conv_rows * common.allPoints);
//d_par_max_coo.set_final_data(nullptr);
fp* d_par_max_coo;
cudaMalloc((void**)&d_par_max_coo, sizeof(fp)*common.mask_conv_rows * common.allPoints);
//buffer<fp,1> d_in_final_sum(common.allPoints);
//d_in_final_sum.set_final_data(nullptr);
fp* d_in_final_sum;
cudaMalloc((void**)&d_in_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_in_sqr_final_sum(common.allPoints);
//d_in_sqr_final_sum.set_final_data(nullptr);
fp* d_in_sqr_final_sum;
cudaMalloc((void**)&d_in_sqr_final_sum, sizeof(fp)*common.allPoints);
//buffer<fp,1> d_denomT(common.allPoints);
//d_denomT.set_final_data(nullptr);
fp* d_denomT;
cudaMalloc((void**)&d_denomT, sizeof(fp)*common.allPoints);
#ifdef TEST_CHECKSUM
//buffer<fp,1> d_checksum(CHECK);
//d_checksum.set_final_data(nullptr);
//printf("%d\n", CHECK);
fp* checksum = (fp*) malloc (sizeof(fp)*CHECK);
fp* d_checksum;
cudaMalloc((void**)&d_checksum, sizeof(fp)*CHECK);
#endif
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
// All kernels operations within kernel use same max size of threads. Size of block size is set to the size appropriate for max size operation (on padded matrix). Other use subsets of that.
dim3 threads(NUMBER_THREADS);
dim3 grids(common.allPoints);
printf("frame progress: ");
fflush(NULL);
//====================================================================================================100
// LAUNCH
//====================================================================================================100
// variables
fp* frame;
int frame_no;
//buffer<fp,1> d_frame(common.frame_elem);
fp* d_frame;
cudaMalloc((void**)&d_frame, sizeof(fp)*common.frame_elem);
for(frame_no=0; frame_no<common.frames_processed; frame_no++) {
//==================================================50
// get and write current frame to GPU buffer
//==================================================50
// Extract a cropped version of the first frame from the video file
frame = get_frame( frames, // pointer to video file
frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
// copy frame to GPU memory
cudaMemcpy(d_frame, frame, sizeof(fp)*common.frame_elem, cudaMemcpyHostToDevice);
//==================================================50
// launch kernel
//==================================================50
hw<<<grids, threads>>>(
frame_no,
common,
d_frame,
d_endoRow,
d_endoCol,
d_tEndoRowLoc,
d_tEndoColLoc,
d_epiRow,
d_epiCol,
d_tEpiRowLoc,
d_tEpiColLoc,
d_endoT,
d_epiT,
d_in2,
d_conv,
d_in2_pad_cumv,
d_in2_pad_cumv_sel,
d_in2_sub_cumh,
d_in2_sub_cumh_sel,
d_in2_sub2,
d_in2_sqr,
d_in2_sqr_sub2,
d_in_sqr,
d_tMask,
d_mask_conv,
d_in_mod_temp,
d_in_partial_sum,
d_in_sqr_partial_sum,
d_par_max_val,
d_par_max_coo,
d_in_final_sum,
d_in_sqr_final_sum,
d_denomT
#ifdef TEST_CHECKSUM
,d_checksum
#endif
);
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(frame);
//==================================================50
// print frame progress
//==================================================50
// print frame progress
printf("%d ", frame_no);
fflush(NULL);
//==================================================50
// DISPLAY CHECKSUM (TESTING)
//==================================================50
#ifdef TEST_CHECKSUM
cudaMemcpy(checksum, d_checksum, sizeof(fp)*CHECK, cudaMemcpyDeviceToHost);
printf("CHECKSUM:\n");
for(int i=0; i<CHECK; i++){
printf("i=%d checksum=%f\n", i, checksum[i]);
}
printf("\n\n");
#endif
}
cudaMemcpy(tEndoRowLoc, d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEndoColLoc, d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEpiRowLoc, d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
cudaMemcpy(tEpiColLoc, d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost);
//====================================================================================================100
// PRINT FRAME PROGRESS END
//====================================================================================================100
#ifdef TEST_CHECKSUM
free(checksum);
cudaFree(d_checksum);
#endif
cudaFree(d_epiT);
cudaFree(d_in2);
cudaFree(d_conv);
cudaFree(d_in2_pad_cumv);
cudaFree(d_in2_pad_cumv_sel);
cudaFree(d_in2_sub_cumh);
cudaFree(d_in2_sub_cumh_sel);
cudaFree(d_in2_sub2);
cudaFree(d_in2_sqr);
cudaFree(d_in2_sqr_sub2);
cudaFree(d_in_sqr);
cudaFree(d_tMask);
cudaFree(d_endoRow);
cudaFree(d_endoCol);
cudaFree(d_tEndoRowLoc);
cudaFree(d_tEndoColLoc);
cudaFree(d_epiRow);
cudaFree(d_epiCol);
cudaFree(d_tEpiRowLoc);
cudaFree(d_tEpiColLoc);
cudaFree(d_mask_conv);
cudaFree(d_in_mod_temp);
cudaFree(d_in_partial_sum);
cudaFree(d_in_sqr_partial_sum);
cudaFree(d_par_max_val);
cudaFree(d_par_max_coo);
cudaFree(d_in_final_sum);
cudaFree(d_in_sqr_final_sum);
cudaFree(d_denomT);
cudaFree(d_frame);
printf("\n");
fflush(NULL);
}
|
89cce0d276310e63d7fe7fab4477d81e0ac5f332.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template<typename Dtype>
void ExpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (inner_scale_ == Dtype(1)) {
caffe_gpu_exp(count, bottom_data, top_data);
} else {
caffe_gpu_scale(count, inner_scale_, bottom_data, top_data);
caffe_gpu_exp(count, top_data, top_data);
}
if (outer_scale_ != Dtype(1)) {
caffe_gpu_scal(count, outer_scale_, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
if (inner_scale_ == Dtype(1)) {
greentea_gpu_exp<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_gpu_scale<Dtype>(this->device_->id(),
count, inner_scale_,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
greentea_gpu_exp<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (outer_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(),
count, outer_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ExpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int count = bottom[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_mul(count, top_data, top_diff, bottom_diff);
if (inner_scale_ != Dtype(1)) {
caffe_gpu_scal(count, inner_scale_, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
if (inner_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, inner_scale_,
(cl_mem) bottom_diff, 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer);
} // namespace caffe
|
89cce0d276310e63d7fe7fab4477d81e0ac5f332.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template<typename Dtype>
void ExpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (inner_scale_ == Dtype(1)) {
caffe_gpu_exp(count, bottom_data, top_data);
} else {
caffe_gpu_scale(count, inner_scale_, bottom_data, top_data);
caffe_gpu_exp(count, top_data, top_data);
}
if (outer_scale_ != Dtype(1)) {
caffe_gpu_scal(count, outer_scale_, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
if (inner_scale_ == Dtype(1)) {
greentea_gpu_exp<Dtype>(this->device_->id(), count,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
} else {
greentea_gpu_scale<Dtype>(this->device_->id(),
count, inner_scale_,
(cl_mem) bottom_data, 0, (cl_mem) top_data, 0);
greentea_gpu_exp<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_data, 0);
}
if (outer_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(),
count, outer_scale_,
(cl_mem) top_data, 0);
}
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void ExpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const int count = bottom[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_mul(count, top_data, top_diff, bottom_diff);
if (inner_scale_ != Dtype(1)) {
caffe_gpu_scal(count, inner_scale_, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
if (inner_scale_ != Dtype(1)) {
greentea_gpu_scal<Dtype>(this->device_->id(), count, inner_scale_,
(cl_mem) bottom_diff, 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ExpLayer);
} // namespace caffe
|
0078e1754a3c7b05f87f0a154677cb92f5979cb8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSSaturation_propagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
unsigned int size = 1;
float threshold = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSSaturation_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,threshold);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSSaturation_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,threshold);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSSaturation_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,threshold);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0078e1754a3c7b05f87f0a154677cb92f5979cb8.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSSaturation_propagate_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
unsigned int size = 1;
float threshold = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSSaturation_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,threshold);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSSaturation_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,threshold);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSSaturation_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,threshold);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
268f7c105f30119185b35deb72fa158f36125cf4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
texture<float, hipTextureType3D , hipReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=sqrt((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -source.x)/(length);
vectY=(P.y -source.y)/(length);
vectZ=(P.z -source.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((geo.DSO/min(geo.dVoxelX,geo.dVoxelY)+maxdist)/geo.accuracy < length)
length=ceil((geo.DSO/min(geo.dVoxelX,geo.dVoxelY)+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
// copy data to CUDA memory
hipArray *d_imagedata = 0;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
if (geo.accuracy>1){
tex.filterMode = hipFilterModePoint;
geo.accuracy=1;
}
else
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
hipMalloc((void**)&dProjection, num_bytes);
hipMemset(dProjection,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
int divU,divV;
divU=8;
divV=8;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
//precomute distances for faster execution
maxdist=maxDistanceCubeXY(geo,geo.alpha,i);
//Precompute per angle constant stuff for speed
computeDeltas(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
hipLaunchKernelGGL(( kernelPixelDetector), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist));
cudaCheckErrors("Kernel fail");
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
// copy result to host
hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
}
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dProjection);
hipFreeArray(d_imagedata);
cudaCheckErrors("hipFree d_imagedata fail");
hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO;
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD-geo.DSO);
Pu0.x=Pu0.x-(geo.DSD-geo.DSO);
Pv0.x=Pv0.x-(geo.DSD-geo.DSO);
//2: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO/max(geo.dVoxelX,geo.dVoxelY)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
void rollPitchYaw(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
|
268f7c105f30119185b35deb72fa158f36125cf4.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary fucntiosn to perform X-ray CBCT projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection.hpp"
#include "mex.h"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
float length=sqrt((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -source.x)/(length);
vectY=(P.y -source.y)/(length);
vectZ=(P.z -source.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((geo.DSO/min(geo.dVoxelX,geo.dVoxelY)+maxdist)/geo.accuracy < length)
length=ceil((geo.DSO/min(geo.dVoxelX,geo.dVoxelY)+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+source.x;
ty=vectY*i+source.y;
tz=vectZ*i+source.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
// copy data to CUDA memory
cudaArray *d_imagedata = 0;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
if (geo.accuracy>1){
tex.filterMode = cudaFilterModePoint;
geo.accuracy=1;
}
else
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
cudaMalloc((void**)&dProjection, num_bytes);
cudaMemset(dProjection,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
int divU,divV;
divU=8;
divV=8;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
//precomute distances for faster execution
maxdist=maxDistanceCubeXY(geo,geo.alpha,i);
//Precompute per angle constant stuff for speed
computeDeltas(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
kernelPixelDetector<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin,floor(maxdist));
cudaCheckErrors("Kernel fail");
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
// copy result to host
cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
}
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dProjection);
cudaFreeArray(d_imagedata);
cudaCheckErrors("cudaFree d_imagedata fail");
cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO;
S.y=0;
S.z=0;
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
// Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours.
// The obkjective is to get a position of the detector in a coordinate system where:
// 1-units are voxel size (in each direction can be different)
// 2-The image has the its first voxel at (0,0,0)
// 3-The image never rotates
// To do that, we need to compute the "deltas" the detector, or "by how much
// (in new xyz) does the voxels change when and index is added". To do that
// several geometric steps needs to be changed
//1.Roll,pitch,jaw
// The detector can have a small rotation.
// according to
//"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706.
// Only the Z rotation will have a big influence in the image quality when they are small.
// Still all rotations are supported
// To roll pitch jaw, the detector has to be in centered in OXYZ.
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now ltes translate the points where they shoudl be:
P.x=P.x-(geo.DSD-geo.DSO);
Pu0.x=Pu0.x-(geo.DSD-geo.DSO);
Pv0.x=Pv0.x-(geo.DSD-geo.DSO);
//2: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//3: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2-geo.dVoxelX/2; S2.y =S2.y+geo.sVoxelY/2-geo.dVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//mexPrintf("COR: %f \n",geo.COR[i]);
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute mas distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO/max(geo.dVoxelX,geo.dVoxelY)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
void rollPitchYaw(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z;
point->z=-sin(geo.dPitch[i])*auxPoint.x
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
|
e48d181367c9781653a1c7da015b8409bbdd96a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d9pt-512-8-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
e48d181367c9781653a1c7da015b8409bbdd96a4.cu
|
#include "j2d9pt-512-8-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
float __reg_7_0;
float __reg_7_1;
float __reg_7_2;
float __reg_7_3;
float __reg_7_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
float __reg_6_0;
float __reg_6_1;
float __reg_6_2;
float __reg_6_3;
float __reg_6_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
float __reg_5_0;
float __reg_5_1;
float __reg_5_2;
float __reg_5_3;
float __reg_5_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
float __reg_4_3;
float __reg_4_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_3_3;
float __reg_3_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_2_3;
float __reg_2_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
__shared__ float __c_sb_double[__blockSize * 2];
float *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
17ef1ca2925ca42c2be0116087d670368ba2bb24.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "chckfnsCuda.cuh"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
#define ORDER 9
using namespace std;
const int ntpb = 9; //9 threads per blocks
int sudoku[ORDER][ORDER] = { 0 };
int isClueGiven[ORDER][ORDER] = { 0 };
int prevPosition[ORDER][ORDER][2];
int placeNum(int row, int column);
void reportTime(const char* msg, chrono::steady_clock::duration span);
void print(int matrix[ORDER][ORDER]) //host code use only
{
for (int i = 0; i < ORDER; i++) {
for (int j = 0; j < ORDER; j++)
cout << matrix[i][j] << " ";
cout << endl;
}
cout << endl;
return;
}
//kernel 1 - store position
__global__ void storePositions() //kernel
{
int temprow, tempcolumn;
temprow = -1;
tempcolumn = -1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (isClueGiven[x][y] == 0) {
prevPosition[x][y][0] = temprow;
prevPosition[x][y][1] = tempcolumn;
temprow = x;
tempcolumn = y;
}
}
__device__ __host__ int goBack(int &row, int &column)
{
int trow, tcolumn;
if (row == 0 && column == 0)
return 0;
sudoku[row][column] = 0;
trow = prevPosition[row][column][0];
tcolumn = prevPosition[row][column][1];
tcolumn -= 1;
row = trow;
column = tcolumn;
return 1;
}
__device__ __host__ int placeNum(int row, int column)
{
if (isClueGiven[row][column] == 1)
return 1;
for (int num = sudoku[row][column] + 1; num <= 9; num++) {
if (checkRow(row, num) && checkColumn(column, num) && checkSquare(row, column, num)) {
sudoku[row][column] = num;
return 1;
}
}
sudoku[row][column] = 0;
return 0;
}
__device__ __host__ int solveSudoku(){
for (int row = 0; row < 9; row++) {
for (int column = 0; column < 9; column++) {
if (!placeNum(row, column)) {
sudoku[row][column] = 0;
if (!goBack(row, column))
return 0;
}
}
}
return 1;
}
int main(int argc, char* argv[])
{
fstream file;
chrono::steady_clock::time_point ts, te;
int nblks = 9; // hard coded makes sense right? It can only have 9 blocks.
if (argc == 2)
{
file.open(argv[1], ios::in);
if (file.is_open())
{
for (int row = 0; row < ORDER; row++) {
for (int column = 0; column < ORDER; column++) {
file >> sudoku[row][column];
if (sudoku[row][column] != 0)
isClueGiven[row][column] = 1;
}
}
print(sudoku);
}
else
cout << "Could not locate file ' " << argv[1] << "'. Enter elements manually" << endl;
}
if (argc > 2)
cout << "More than one arguments. Enter elements manually\n";
if (!file.is_open()) {
cout << "Enter 81 elements (0s for cells without clues) :" << endl;
for (int row = 0; row < ORDER; row++) {
for (int column = 0; column < ORDER; column++) {
cin >> sudoku[row][column];
if (sudoku[row][column] != 0)
isClueGiven[row][column] = 1;
}
}
print(sudoku);
}
ts = chrono::steady_clock::now();
storePositions();
te = chrono::steady_clock::now();
reportTime("Position storage time:", te - ts);
ts = chrono::steady_clock::now();
solveSudoku();
te = chrono::steady_clock::now();
reportTime("Time to solve:", te - ts);
print(sudoku);
return 0;
}
// report system time
//
void reportTime(const char* msg, chrono::steady_clock::duration span) {
auto ms = chrono::duration_cast<chrono::milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
std::cout << "" << std::endl;
}
|
17ef1ca2925ca42c2be0116087d670368ba2bb24.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "chckfnsCuda.cuh"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
#define ORDER 9
using namespace std;
const int ntpb = 9; //9 threads per blocks
int sudoku[ORDER][ORDER] = { 0 };
int isClueGiven[ORDER][ORDER] = { 0 };
int prevPosition[ORDER][ORDER][2];
int placeNum(int row, int column);
void reportTime(const char* msg, chrono::steady_clock::duration span);
void print(int matrix[ORDER][ORDER]) //host code use only
{
for (int i = 0; i < ORDER; i++) {
for (int j = 0; j < ORDER; j++)
cout << matrix[i][j] << " ";
cout << endl;
}
cout << endl;
return;
}
//kernel 1 - store position
__global__ void storePositions() //kernel
{
int temprow, tempcolumn;
temprow = -1;
tempcolumn = -1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (isClueGiven[x][y] == 0) {
prevPosition[x][y][0] = temprow;
prevPosition[x][y][1] = tempcolumn;
temprow = x;
tempcolumn = y;
}
}
__device__ __host__ int goBack(int &row, int &column)
{
int trow, tcolumn;
if (row == 0 && column == 0)
return 0;
sudoku[row][column] = 0;
trow = prevPosition[row][column][0];
tcolumn = prevPosition[row][column][1];
tcolumn -= 1;
row = trow;
column = tcolumn;
return 1;
}
__device__ __host__ int placeNum(int row, int column)
{
if (isClueGiven[row][column] == 1)
return 1;
for (int num = sudoku[row][column] + 1; num <= 9; num++) {
if (checkRow(row, num) && checkColumn(column, num) && checkSquare(row, column, num)) {
sudoku[row][column] = num;
return 1;
}
}
sudoku[row][column] = 0;
return 0;
}
__device__ __host__ int solveSudoku(){
for (int row = 0; row < 9; row++) {
for (int column = 0; column < 9; column++) {
if (!placeNum(row, column)) {
sudoku[row][column] = 0;
if (!goBack(row, column))
return 0;
}
}
}
return 1;
}
int main(int argc, char* argv[])
{
fstream file;
chrono::steady_clock::time_point ts, te;
int nblks = 9; // hard coded makes sense right? It can only have 9 blocks.
if (argc == 2)
{
file.open(argv[1], ios::in);
if (file.is_open())
{
for (int row = 0; row < ORDER; row++) {
for (int column = 0; column < ORDER; column++) {
file >> sudoku[row][column];
if (sudoku[row][column] != 0)
isClueGiven[row][column] = 1;
}
}
print(sudoku);
}
else
cout << "Could not locate file ' " << argv[1] << "'. Enter elements manually" << endl;
}
if (argc > 2)
cout << "More than one arguments. Enter elements manually\n";
if (!file.is_open()) {
cout << "Enter 81 elements (0s for cells without clues) :" << endl;
for (int row = 0; row < ORDER; row++) {
for (int column = 0; column < ORDER; column++) {
cin >> sudoku[row][column];
if (sudoku[row][column] != 0)
isClueGiven[row][column] = 1;
}
}
print(sudoku);
}
ts = chrono::steady_clock::now();
storePositions();
te = chrono::steady_clock::now();
reportTime("Position storage time:", te - ts);
ts = chrono::steady_clock::now();
solveSudoku();
te = chrono::steady_clock::now();
reportTime("Time to solve:", te - ts);
print(sudoku);
return 0;
}
// report system time
//
void reportTime(const char* msg, chrono::steady_clock::duration span) {
auto ms = chrono::duration_cast<chrono::milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
std::cout << "" << std::endl;
}
|
eaf86115cad70c52eb6258a159a2d0ff43a0b78f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Discrete Sine Transform in row wise (DST two)
* DST_II_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_II_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_II_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTII_Row_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
//Bs[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.y + k*TILE_DIM) + 1) / (2.0 * numAColumns))*PI_d*Col)*sqrt(1.0 / (1 + DELTA(Col + 1, 1)))*sqrt(2.0 / numAColumns);
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = sin((((threadIdx.y + k*TILE_DIM) + 0.5)*PI_d*(Col + 1)) / (numAColumns))*sqrt((2.0 - DELTA(Col + 1, numAColumns)) / (numAColumns)); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTRowTwo(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
|
eaf86115cad70c52eb6258a159a2d0ff43a0b78f.cu
|
/*
* Discrete Sine Transform in row wise (DST two)
* DST_II_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_II_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_II_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTII_Row_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
//Bs[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.y + k*TILE_DIM) + 1) / (2.0 * numAColumns))*PI_d*Col)*sqrt(1.0 / (1 + DELTA(Col + 1, 1)))*sqrt(2.0 / numAColumns);
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = sin((((threadIdx.y + k*TILE_DIM) + 0.5)*PI_d*(Col + 1)) / (numAColumns))*sqrt((2.0 - DELTA(Col + 1, numAColumns)) / (numAColumns)); }
//Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTRowTwo(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTII_Row_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
|
ffbff4b7b66ee50106ba60c0b007eef1e4523596.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "rgbToV210.h"
__global__ static void rgbToV210Kernel(uint16_t *pSrc, uint16_t *pDst, int nSrcWidth, int nHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint3 rgb;
uint4 pF;
int nDstW = nSrcWidth / 18;
int nDstH = nHeight;
if (tid < nDstW && tidd < nDstH) {
int k = tid * 18;
int j = tidd * nSrcWidth;
rgb.x = pSrc[j + k + 0];
rgb.y = pSrc[j + k + 1];
rgb.z = pSrc[j + k + 2];
y0 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u0 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v0 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 3];
rgb.y = pSrc[j + k + 4];
rgb.z = pSrc[j + k + 5];
y1 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u1 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v1 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 6];
rgb.y = pSrc[j + k + 7];
rgb.z = pSrc[j + k + 8];
y2 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u2 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v2 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 9];
rgb.y = pSrc[j + k + 10];
rgb.z = pSrc[j + k + 11];
y3 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
rgb.x = pSrc[j + k + 12];
rgb.y = pSrc[j + k + 13];
rgb.z = pSrc[j + k + 14];
y4 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
rgb.x = pSrc[j + k + 15];
rgb.y = pSrc[j + k + 16];
rgb.z = pSrc[j + k + 17];
y5 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
pF.x = (v0 << 20) | (y0 << 10) | u0;
pF.y = (y2 << 20) | (u1 << 10) | y1;
pF.z = (u2 << 20) | (y3 << 10) | v1;
pF.w = (y5 << 20) | (v2 << 10) | y4;
k = tid * 8;
j *= 4;
j /= 9;
pDst[j + k + 0] = (uint32_t)(pF.x & 0x0000FFFF);
pDst[j + k + 1] = (uint32_t)(pF.x >> 16);
pDst[j + k + 2] = (uint32_t)(pF.y & 0x0000FFFF);
pDst[j + k + 3] = (uint32_t)(pF.y >> 16);
pDst[j + k + 4] = (uint32_t)(pF.z & 0x0000FFFF);
pDst[j + k + 5] = (uint32_t)(pF.z >> 16);
pDst[j + k + 6] = (uint32_t)(pF.w & 0x0000FFFF);
pDst[j + k + 7] = (uint32_t)(pF.w >> 16);
}
}
void rgbToV210(uint16_t *dpSrc, uint16_t *dpDst, int nPitch, int nHeight, hipStream_t stream) {
dim3 blocks(16, 16, 1);
dim3 grids((nPitch + blocks.x - 1) / blocks.x, (nHeight + blocks.y - 1) / blocks.y, 1);
rgbToV210Kernel << < grids, blocks, 0, stream >> > (dpSrc, dpDst, nPitch, nHeight);
}
|
ffbff4b7b66ee50106ba60c0b007eef1e4523596.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "rgbToV210.h"
__global__ static void rgbToV210Kernel(uint16_t *pSrc, uint16_t *pDst, int nSrcWidth, int nHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint3 rgb;
uint4 pF;
int nDstW = nSrcWidth / 18;
int nDstH = nHeight;
if (tid < nDstW && tidd < nDstH) {
int k = tid * 18;
int j = tidd * nSrcWidth;
rgb.x = pSrc[j + k + 0];
rgb.y = pSrc[j + k + 1];
rgb.z = pSrc[j + k + 2];
y0 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u0 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v0 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 3];
rgb.y = pSrc[j + k + 4];
rgb.z = pSrc[j + k + 5];
y1 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u1 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v1 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 6];
rgb.y = pSrc[j + k + 7];
rgb.z = pSrc[j + k + 8];
y2 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
u2 = (512000 - rgb.x * 169 - rgb.y * 332 + rgb.z * 500) / 1000;
v2 = (512000 + rgb.x * 500 - rgb.y * 419 - rgb.z * 81) / 1000;
rgb.x = pSrc[j + k + 9];
rgb.y = pSrc[j + k + 10];
rgb.z = pSrc[j + k + 11];
y3 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
rgb.x = pSrc[j + k + 12];
rgb.y = pSrc[j + k + 13];
rgb.z = pSrc[j + k + 14];
y4 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
rgb.x = pSrc[j + k + 15];
rgb.y = pSrc[j + k + 16];
rgb.z = pSrc[j + k + 17];
y5 = (rgb.x * 299 + rgb.y * 587 + rgb.z * 114) / 1000;
pF.x = (v0 << 20) | (y0 << 10) | u0;
pF.y = (y2 << 20) | (u1 << 10) | y1;
pF.z = (u2 << 20) | (y3 << 10) | v1;
pF.w = (y5 << 20) | (v2 << 10) | y4;
k = tid * 8;
j *= 4;
j /= 9;
pDst[j + k + 0] = (uint32_t)(pF.x & 0x0000FFFF);
pDst[j + k + 1] = (uint32_t)(pF.x >> 16);
pDst[j + k + 2] = (uint32_t)(pF.y & 0x0000FFFF);
pDst[j + k + 3] = (uint32_t)(pF.y >> 16);
pDst[j + k + 4] = (uint32_t)(pF.z & 0x0000FFFF);
pDst[j + k + 5] = (uint32_t)(pF.z >> 16);
pDst[j + k + 6] = (uint32_t)(pF.w & 0x0000FFFF);
pDst[j + k + 7] = (uint32_t)(pF.w >> 16);
}
}
void rgbToV210(uint16_t *dpSrc, uint16_t *dpDst, int nPitch, int nHeight, cudaStream_t stream) {
dim3 blocks(16, 16, 1);
dim3 grids((nPitch + blocks.x - 1) / blocks.x, (nHeight + blocks.y - 1) / blocks.y, 1);
rgbToV210Kernel << < grids, blocks, 0, stream >> > (dpSrc, dpDst, nPitch, nHeight);
}
|
93e1549c0d833f5cc07e10ffa135ce0b3042485e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <hipcub/hipcub.hpp>
#include <cudf/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cudf_utils.h>
#include <utilities/cuda_utils.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/legacy_bitmask.hpp>
#include <bitmask/legacy/bit_mask.cuh>
using bit_mask::bit_mask_t;
namespace{ //anonymous
static constexpr int warp_size = 32;
static constexpr int BLOCK_SIZE = 256;
// returns the block_sum using the given shared array of warp sums.
template <typename T>
__device__ T sum_warps(T* warp_smem)
{
T block_sum = 0;
if (threadIdx.x < warp_size) {
T my_warp_sum = warp_smem[threadIdx.x];
__shared__ typename hipcub::WarpReduce<T>::TempStorage temp_storage;
block_sum = hipcub::WarpReduce<T>(temp_storage).Sum(my_warp_sum);
}
return block_sum;
}
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(gdf_size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end, input_data[idx]);
T new_value{0};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = bit_mask::is_valid(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template <class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(const T* __restrict__ input_data,
bit_mask_t const * __restrict__ input_valid,
T * __restrict__ output_data,
bit_mask_t * __restrict__ output_valid,
gdf_size_type * __restrict__ output_valid_count,
gdf_size_type nrows,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
gdf_size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
__shared__ uint32_t valid_sum[warp_size];
// init shared memory for block valid counts
if (input_has_nulls or replacement_has_nulls){
if(threadIdx.x < warp_size) valid_sum[threadIdx.x] = 0;
__syncthreads();
}
while (i < nrows) {
bool output_is_valid = true;
uint32_t bitmask = 0xffffffff;
if (input_has_nulls) {
bool const input_is_valid{bit_mask::is_valid(input_valid, i)};
output_is_valid = input_is_valid;
bitmask = __ballot_sync(active_mask, input_is_valid);
if (input_is_valid) {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
} else {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls){
bitmask &= __ballot_sync(active_mask, output_is_valid);
if(0 == (threadIdx.x % warp_size)){
output_valid[(int)(i/warp_size)] = bitmask;
valid_sum[(int)(threadIdx.x / warp_size)] += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if(input_has_nulls or replacement_has_nulls){
__syncthreads(); // waiting for the valid counts of each warp to be ready
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = sum_warps<uint32_t>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x < warp_size && 0 == (threadIdx.x % warp_size)) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
gdf_column &output,
hipStream_t stream = 0)
{
const bool input_has_nulls = cudf::has_nulls(input_col);
const bool replacement_has_nulls = cudf::has_nulls(replacement_values);
const bit_mask_t* __restrict__ typed_input_valid =
reinterpret_cast<bit_mask_t*>(input_col.valid);
const bit_mask_t* __restrict__ typed_replacement_valid =
reinterpret_cast<bit_mask_t*>(replacement_values.valid);
bit_mask_t* __restrict__ typed_out_valid =
reinterpret_cast<bit_mask_t*>(output.valid);
gdf_size_type *valid_count = nullptr;
if (typed_out_valid != nullptr) {
RMM_ALLOC(&valid_count, sizeof(gdf_size_type), stream);
CUDA_TRY(hipMemsetAsync(valid_count, 0, sizeof(gdf_size_type), stream));
}
col_type const * values_to_replace_ptr{ cudf::get_data<col_type const>(values_to_replace) };
cudf::util::cuda::grid_config_1d grid{output.size, BLOCK_SIZE, 1};
auto replace = replace_kernel<col_type, true, true>;
if (input_has_nulls){
if (replacement_has_nulls){
replace = replace_kernel<col_type, true, true>;
}else{
replace = replace_kernel<col_type, true, false>;
}
}else{
if (replacement_has_nulls){
replace = replace_kernel<col_type, false, true>;
}else{
replace = replace_kernel<col_type, false, false>;
}
}
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream,
static_cast<const col_type*>(input_col.data),
typed_input_valid,
static_cast<col_type*>(output.data),
typed_out_valid,
valid_count,
output.size,
values_to_replace_ptr,
values_to_replace_ptr + replacement_values.size,
static_cast<const col_type*>(replacement_values.data),
typed_replacement_valid);
if(typed_out_valid != nullptr){
gdf_size_type valids {0};
CUDA_TRY(hipMemcpyAsync(&valids, valid_count,
sizeof(gdf_size_type), hipMemcpyDefault, stream));
output.null_count = output.size - valids;
RMM_FREE(valid_count, stream);
}
}
};
} //end anonymous namespace
namespace cudf{
namespace detail {
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
hipStream_t stream = 0) {
if (0 == input_col.size )
{
return cudf::empty_like(input_col);
}
if (0 == values_to_replace.size || 0 == replacement_values.size)
{
return cudf::copy(input_col, stream);
}
CUDF_EXPECTS(values_to_replace.size == replacement_values.size,
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.dtype == values_to_replace.dtype &&
input_col.dtype == replacement_values.dtype,
"Columns type mismatch.");
CUDF_EXPECTS(input_col.data != nullptr, "Null input data.");
CUDF_EXPECTS(values_to_replace.data != nullptr && replacement_values.data != nullptr,
"Null replace data.");
CUDF_EXPECTS(values_to_replace.valid == nullptr || values_to_replace.null_count == 0,
"Nulls are in values_to_replace column.");
gdf_column output = cudf::allocate_like(input_col, RETAIN, stream);
if (nullptr == input_col.valid && replacement_values.valid != nullptr) {
gdf_valid_type *valid = nullptr;
gdf_size_type bytes = gdf_valid_allocation_size(input_col.size);
RMM_ALLOC(&valid, bytes, stream);
CUDA_TRY(hipMemsetAsync(valid, 0, bytes, stream));
CUDF_EXPECTS(GDF_SUCCESS == gdf_column_view(&output, output.data, valid,
input_col.size, input_col.dtype),
"cudf::replace failed to add valid mask to output col.");
}
cudf::type_dispatcher(input_col.dtype, replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
output,
stream);
CHECK_STREAM(stream);
return output;
}
} //end details
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values){
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values);
}
} //end cudf
namespace{ //anonymous
using bit_mask::bit_mask_t;
template <typename Type>
__global__
void replace_nulls_with_scalar(gdf_size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : *replacement;
}
}
template <typename Type>
__global__
void replace_nulls_with_column(gdf_size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : replacement[i];
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type>
void operator()(gdf_size_type nrows,
void* d_in_data,
gdf_valid_type* d_in_valid,
const void* d_replacement,
void* d_out_data,
hipStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
hipLaunchKernelGGL(( replace_nulls_with_column), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type>
void operator()(gdf_size_type nrows,
void* d_in_data,
gdf_valid_type* d_in_valid,
const void* replacement,
void* d_out_data,
hipStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
auto t_replacement = static_cast<const col_type*>(replacement);
col_type* d_replacement = nullptr;
RMM_TRY(RMM_ALLOC(&d_replacement, sizeof(col_type), stream));
CUDA_TRY(hipMemcpyAsync(d_replacement, t_replacement, sizeof(col_type),
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( replace_nulls_with_scalar), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream, nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
RMM_TRY(RMM_FREE(d_replacement, stream));
}
};
} //end anonymous namespace
namespace cudf {
namespace detail {
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement,
hipStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(replacement.size == 1 || replacement.size == input.size, "Column size mismatch");
CUDF_EXPECTS(nullptr != replacement.data, "Null replacement data");
CUDF_EXPECTS(nullptr == replacement.valid || 0 == replacement.null_count,
"Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_column_kernel_forwarder{},
input.size,
input.data,
input.valid,
replacement.data,
output.data,
stream);
return output;
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement,
hipStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(true == replacement.is_valid, "Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_scalar_kernel_forwarder{},
input.size,
input.data,
input.valid,
&(replacement.data),
output.data);
return output;
}
} // namespace detail
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
} // namespace cudf
|
93e1549c0d833f5cc07e10ffa135ce0b3042485e.cu
|
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_ptr.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
#include <cub/cub.cuh>
#include <cudf/copying.hpp>
#include <cudf/replace.hpp>
#include <cudf/cudf.h>
#include <rmm/rmm.h>
#include <cudf/types.hpp>
#include <utilities/error_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cudf_utils.h>
#include <utilities/cuda_utils.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/legacy_bitmask.hpp>
#include <bitmask/legacy/bit_mask.cuh>
using bit_mask::bit_mask_t;
namespace{ //anonymous
static constexpr int warp_size = 32;
static constexpr int BLOCK_SIZE = 256;
// returns the block_sum using the given shared array of warp sums.
template <typename T>
__device__ T sum_warps(T* warp_smem)
{
T block_sum = 0;
if (threadIdx.x < warp_size) {
T my_warp_sum = warp_smem[threadIdx.x];
__shared__ typename cub::WarpReduce<T>::TempStorage temp_storage;
block_sum = cub::WarpReduce<T>(temp_storage).Sum(my_warp_sum);
}
return block_sum;
}
// return the new_value for output column at index `idx`
template<class T, bool replacement_has_nulls>
__device__ auto get_new_value(gdf_size_type idx,
const T* __restrict__ input_data,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
auto found_ptr = thrust::find(thrust::seq, values_to_replace_begin,
values_to_replace_end, input_data[idx]);
T new_value{0};
bool output_is_valid{true};
if (found_ptr != values_to_replace_end) {
auto d = thrust::distance(values_to_replace_begin, found_ptr);
new_value = d_replacement_values[d];
if (replacement_has_nulls) {
output_is_valid = bit_mask::is_valid(replacement_valid, d);
}
} else {
new_value = input_data[idx];
}
return thrust::make_pair(new_value, output_is_valid);
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel that replaces elements from `output_data` given the following
* rule: replace all `values_to_replace[i]` in [values_to_replace_begin`,
* `values_to_replace_end`) present in `output_data` with `d_replacement_values[i]`.
*
* @tparam input_has_nulls `true` if output column has valid mask, `false` otherwise
* @tparam replacement_has_nulls `true` if replacement_values column has valid mask, `false` otherwise
* The input_has_nulls and replacement_has_nulls template parameters allows us to specialize
* this kernel for the different scenario for performance without writing different kernel.
*
* @param[in] input_data Device array with the data to be modified
* @param[in] input_valid Valid mask associated with input_data
* @param[out] output_data Device array to store the data from input_data
* @param[out] output_valid Valid mask associated with output_data
* @param[out] output_valid_count #valid in output column
* @param[in] nrows # rows in `output_data`
* @param[in] values_to_replace_begin Device pointer to the beginning of the sequence
* of old values to be replaced
* @param[in] values_to_replace_end Device pointer to the end of the sequence
* of old values to be replaced
* @param[in] d_replacement_values Device array with the new values
* @param[in] replacement_valid Valid mask associated with d_replacement_values
*
* @returns
*/
/* ----------------------------------------------------------------------------*/
template <class T,
bool input_has_nulls, bool replacement_has_nulls>
__global__
void replace_kernel(const T* __restrict__ input_data,
bit_mask_t const * __restrict__ input_valid,
T * __restrict__ output_data,
bit_mask_t * __restrict__ output_valid,
gdf_size_type * __restrict__ output_valid_count,
gdf_size_type nrows,
const T* __restrict__ values_to_replace_begin,
const T* __restrict__ values_to_replace_end,
const T* __restrict__ d_replacement_values,
bit_mask_t const * __restrict__ replacement_valid)
{
gdf_size_type i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
__shared__ uint32_t valid_sum[warp_size];
// init shared memory for block valid counts
if (input_has_nulls or replacement_has_nulls){
if(threadIdx.x < warp_size) valid_sum[threadIdx.x] = 0;
__syncthreads();
}
while (i < nrows) {
bool output_is_valid = true;
uint32_t bitmask = 0xffffffff;
if (input_has_nulls) {
bool const input_is_valid{bit_mask::is_valid(input_valid, i)};
output_is_valid = input_is_valid;
bitmask = __ballot_sync(active_mask, input_is_valid);
if (input_is_valid) {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
} else {
thrust::tie(output_data[i], output_is_valid) =
get_new_value<T, replacement_has_nulls>(i, input_data,
values_to_replace_begin,
values_to_replace_end,
d_replacement_values,
replacement_valid);
}
/* output valid counts calculations*/
if (input_has_nulls or replacement_has_nulls){
bitmask &= __ballot_sync(active_mask, output_is_valid);
if(0 == (threadIdx.x % warp_size)){
output_valid[(int)(i/warp_size)] = bitmask;
valid_sum[(int)(threadIdx.x / warp_size)] += __popc(bitmask);
}
}
i += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if(input_has_nulls or replacement_has_nulls){
__syncthreads(); // waiting for the valid counts of each warp to be ready
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = sum_warps<uint32_t>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x < warp_size && 0 == (threadIdx.x % warp_size)) {
atomicAdd(output_valid_count, block_valid_count);
}
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_kernel` with the appropriate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_kernel_forwarder {
template <typename col_type>
void operator()(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
gdf_column &output,
cudaStream_t stream = 0)
{
const bool input_has_nulls = cudf::has_nulls(input_col);
const bool replacement_has_nulls = cudf::has_nulls(replacement_values);
const bit_mask_t* __restrict__ typed_input_valid =
reinterpret_cast<bit_mask_t*>(input_col.valid);
const bit_mask_t* __restrict__ typed_replacement_valid =
reinterpret_cast<bit_mask_t*>(replacement_values.valid);
bit_mask_t* __restrict__ typed_out_valid =
reinterpret_cast<bit_mask_t*>(output.valid);
gdf_size_type *valid_count = nullptr;
if (typed_out_valid != nullptr) {
RMM_ALLOC(&valid_count, sizeof(gdf_size_type), stream);
CUDA_TRY(cudaMemsetAsync(valid_count, 0, sizeof(gdf_size_type), stream));
}
col_type const * values_to_replace_ptr{ cudf::get_data<col_type const>(values_to_replace) };
cudf::util::cuda::grid_config_1d grid{output.size, BLOCK_SIZE, 1};
auto replace = replace_kernel<col_type, true, true>;
if (input_has_nulls){
if (replacement_has_nulls){
replace = replace_kernel<col_type, true, true>;
}else{
replace = replace_kernel<col_type, true, false>;
}
}else{
if (replacement_has_nulls){
replace = replace_kernel<col_type, false, true>;
}else{
replace = replace_kernel<col_type, false, false>;
}
}
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(
static_cast<const col_type*>(input_col.data),
typed_input_valid,
static_cast<col_type*>(output.data),
typed_out_valid,
valid_count,
output.size,
values_to_replace_ptr,
values_to_replace_ptr + replacement_values.size,
static_cast<const col_type*>(replacement_values.data),
typed_replacement_valid);
if(typed_out_valid != nullptr){
gdf_size_type valids {0};
CUDA_TRY(cudaMemcpyAsync(&valids, valid_count,
sizeof(gdf_size_type), cudaMemcpyDefault, stream));
output.null_count = output.size - valids;
RMM_FREE(valid_count, stream);
}
}
};
} //end anonymous namespace
namespace cudf{
namespace detail {
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values,
cudaStream_t stream = 0) {
if (0 == input_col.size )
{
return cudf::empty_like(input_col);
}
if (0 == values_to_replace.size || 0 == replacement_values.size)
{
return cudf::copy(input_col, stream);
}
CUDF_EXPECTS(values_to_replace.size == replacement_values.size,
"values_to_replace and replacement_values size mismatch.");
CUDF_EXPECTS(input_col.dtype == values_to_replace.dtype &&
input_col.dtype == replacement_values.dtype,
"Columns type mismatch.");
CUDF_EXPECTS(input_col.data != nullptr, "Null input data.");
CUDF_EXPECTS(values_to_replace.data != nullptr && replacement_values.data != nullptr,
"Null replace data.");
CUDF_EXPECTS(values_to_replace.valid == nullptr || values_to_replace.null_count == 0,
"Nulls are in values_to_replace column.");
gdf_column output = cudf::allocate_like(input_col, RETAIN, stream);
if (nullptr == input_col.valid && replacement_values.valid != nullptr) {
gdf_valid_type *valid = nullptr;
gdf_size_type bytes = gdf_valid_allocation_size(input_col.size);
RMM_ALLOC(&valid, bytes, stream);
CUDA_TRY(cudaMemsetAsync(valid, 0, bytes, stream));
CUDF_EXPECTS(GDF_SUCCESS == gdf_column_view(&output, output.data, valid,
input_col.size, input_col.dtype),
"cudf::replace failed to add valid mask to output col.");
}
cudf::type_dispatcher(input_col.dtype, replace_kernel_forwarder{},
input_col,
values_to_replace,
replacement_values,
output,
stream);
CHECK_STREAM(stream);
return output;
}
} //end details
/* --------------------------------------------------------------------------*/
/**
* @brief Replace elements from `input_col` according to the mapping `values_to_replace` to
* `replacement_values`, that is, replace all `values_to_replace[i]` present in `input_col`
* with `replacement_values[i]`.
*
* @param[in] col gdf_column with the data to be modified
* @param[in] values_to_replace gdf_column with the old values to be replaced
* @param[in] replacement_values gdf_column with the new values
*
* @returns output gdf_column with the modified data
*/
/* ----------------------------------------------------------------------------*/
gdf_column find_and_replace_all(const gdf_column &input_col,
const gdf_column &values_to_replace,
const gdf_column &replacement_values){
return detail::find_and_replace_all(input_col, values_to_replace, replacement_values);
}
} //end cudf
namespace{ //anonymous
using bit_mask::bit_mask_t;
template <typename Type>
__global__
void replace_nulls_with_scalar(gdf_size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : *replacement;
}
}
template <typename Type>
__global__
void replace_nulls_with_column(gdf_size_type size,
const Type* __restrict__ in_data,
const bit_mask_t* __restrict__ in_valid,
const Type* __restrict__ replacement,
Type* __restrict__ out_data)
{
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int start = tid + blkid * blksz;
int step = blksz * gridsz;
for (int i=start; i<size; i+=step) {
out_data[i] = bit_mask::is_valid(in_valid, i)? in_data[i] : replacement[i];
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type>
void operator()(gdf_size_type nrows,
void* d_in_data,
gdf_valid_type* d_in_valid,
const void* d_replacement,
void* d_out_data,
cudaStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
replace_nulls_with_column<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
}
};
/* --------------------------------------------------------------------------*/
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the apropiate data types.
*/
/* ----------------------------------------------------------------------------*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type>
void operator()(gdf_size_type nrows,
void* d_in_data,
gdf_valid_type* d_in_valid,
const void* replacement,
void* d_out_data,
cudaStream_t stream = 0)
{
cudf::util::cuda::grid_config_1d grid{nrows, BLOCK_SIZE};
auto t_replacement = static_cast<const col_type*>(replacement);
col_type* d_replacement = nullptr;
RMM_TRY(RMM_ALLOC(&d_replacement, sizeof(col_type), stream));
CUDA_TRY(cudaMemcpyAsync(d_replacement, t_replacement, sizeof(col_type),
cudaMemcpyHostToDevice, stream));
replace_nulls_with_scalar<<<grid.num_blocks, BLOCK_SIZE, 0, stream>>>(nrows,
static_cast<const col_type*>(d_in_data),
reinterpret_cast<bit_mask_t*>(d_in_valid),
static_cast<const col_type*>(d_replacement),
static_cast<col_type*>(d_out_data)
);
RMM_TRY(RMM_FREE(d_replacement, stream));
}
};
} //end anonymous namespace
namespace cudf {
namespace detail {
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement,
cudaStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(replacement.size == 1 || replacement.size == input.size, "Column size mismatch");
CUDF_EXPECTS(nullptr != replacement.data, "Null replacement data");
CUDF_EXPECTS(nullptr == replacement.valid || 0 == replacement.null_count,
"Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_column_kernel_forwarder{},
input.size,
input.data,
input.valid,
replacement.data,
output.data,
stream);
return output;
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement,
cudaStream_t stream)
{
if (input.size == 0) {
return cudf::empty_like(input);
}
CUDF_EXPECTS(nullptr != input.data, "Null input data");
if (input.null_count == 0 || input.valid == nullptr) {
return cudf::copy(input);
}
CUDF_EXPECTS(input.dtype == replacement.dtype, "Data type mismatch");
CUDF_EXPECTS(true == replacement.is_valid, "Invalid replacement data");
gdf_column output = cudf::allocate_like(input, NEVER, stream);
cudf::type_dispatcher(input.dtype, replace_nulls_scalar_kernel_forwarder{},
input.size,
input.data,
input.valid,
&(replacement.data),
output.data);
return output;
}
} // namespace detail
gdf_column replace_nulls(const gdf_column& input,
const gdf_column& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
gdf_column replace_nulls(const gdf_column& input,
const gdf_scalar& replacement)
{
return detail::replace_nulls(input, replacement, 0);
}
} // namespace cudf
|
f0430eb3b44128e3c1ba605dc333228fd9f30239.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
#endif // USE_ROCM
template<typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel(
CL_KERNEL_SELECT("relu_forward"));
viennacl::ocl::enqueue(
oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx), negative_slope),
ctx.get_queue());
ctx.get_queue().finish();
#endif // USE_GREENTEA
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void ReLUBackward(const int_tp n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);
}
}
#endif // USE_ROCM
template<typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel(
CL_KERNEL_SELECT("relu_backward"));
viennacl::ocl::enqueue(
oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
negative_slope),
ctx.get_queue());
ctx.get_queue().finish();
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
f0430eb3b44128e3c1ba605dc333228fd9f30239.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
#endif // USE_CUDA
template<typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel(
CL_KERNEL_SELECT("relu_forward"));
viennacl::ocl::enqueue(
oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx), negative_slope),
ctx.get_queue());
ctx.get_queue().finish();
#endif // USE_GREENTEA
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void ReLUBackward(const int_tp n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);
}
}
#endif // USE_CUDA
template<typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel(
CL_KERNEL_SELECT("relu_backward"));
viennacl::ocl::enqueue(
oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
negative_slope),
ctx.get_queue());
ctx.get_queue().finish();
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
0f22226e3f37c2c4077d47dc7bf70b41870e5092.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Based off work by Nelson, et al.
Brigham Young University (2010)
Adapted by Kevin Yuh (2015)
Modified by Jordan Bonilla and Matthew Cedeno (2016)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hipfft.h>
#include <time.h>
#include "ta_utilities.hpp"
#define PI 3.14159265358979
/* Check errors on CUDA runtime functions */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
// declare texture reference
texture<float, 2, hipReadModeElementType> texreference;
/* Check errors on cuFFT functions */
void gpuFFTchk(int errval){
if (errval != HIPFFT_SUCCESS){
printf("Failed FFT call, error code %d\n", errval);
}
}
/* Check errors on CUDA kernel calls */
void checkCUDAKernelError()
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
} else {
fprintf(stderr, "No kernel error detected\n");
}
}
__global__
void cudaMultiplyKernel(hipfftComplex *raw_data,
unsigned int nAngles, unsigned int sinogram_width) {
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < nAngles * sinogram_width) {
unsigned int p = thread_index % sinogram_width;
if (p < sinogram_width / 2) {
raw_data[thread_index].x = raw_data[thread_index].x * ((2.0 * p) / sinogram_width);
raw_data[thread_index].y = raw_data[thread_index].y * ((2.0 * p) / sinogram_width);
} else {
raw_data[thread_index].x = raw_data[thread_index].x * ((2.0 * (sinogram_width - p)) / sinogram_width);
raw_data[thread_index].y = raw_data[thread_index].y * ((2.0 * (sinogram_width - p)) / sinogram_width);
}
// raw_data[thread_index].x = raw_data[thread_index].x;
// raw_data[thread_index].y = raw_data[thread_index].y;
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void cudaTakeFloatKernel(const hipfftComplex *dev_out_filter,
float *dev_sinogram_float, const unsigned int nAngles, const unsigned int sinogram_width) {
unsigned int thread_index = blockDim.x * blockIdx.x + threadIdx.x;
while (thread_index < nAngles * sinogram_width) {
dev_sinogram_float[thread_index] = dev_out_filter[thread_index].x;
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void cudaBackProjKernel(float *output_dev,
const unsigned int nAngles,
const unsigned int sinogram_width,
const unsigned int width,
const unsigned int height) {
unsigned int thread_index = blockDim.x * blockIdx.x + threadIdx.x;
while (thread_index < width * height) {
int y0 = height/2 - thread_index / width;
int x0 = thread_index % width - width /2;
for (int i = 0; i < nAngles; ++i) {
float sita = (float)i * PI / nAngles;
float d, xi, yi, q;
if (sita == 0) {
d = x0;
} else if (sita == PI / 2) {
d = y0;
} else {
float m = -cos(sita)/sin(sita);
q = -1/m;
xi = (y0 - m * x0)/(q - m);
yi = q * xi;
d = sqrtf(xi * xi + yi * yi);
}
if ((q > 0 && xi < 0)||(q < 0 && xi > 0)) {
output_dev[thread_index] += tex2D(texreference, sinogram_width/2-d, i); // ( , xindex, yindex)
} else {
output_dev[thread_index] += tex2D(texreference, sinogram_width/2+d, i);
// output_dev[index] += dev_sinogram_float[(int)(i * sinogram_width + d + sinogram_width / 2)];
}
}
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallMultiplyKernel (const unsigned int blocks,
const unsigned int threadsPerBlock,
hipfftComplex *raw_data,
const unsigned int nAngles,
const unsigned int sinogram_width) {
hipLaunchKernelGGL(( cudaMultiplyKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, nAngles, sinogram_width);
}
void cudaCallTakeFloatKernel(const unsigned int nBlocks,
const unsigned int threadsPerBlock,
const hipfftComplex *dev_out_filter,
float *dev_sinogram_float,
const unsigned int nAngles,
const unsigned int sinogram_width) {
hipLaunchKernelGGL(( cudaTakeFloatKernel), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, dev_out_filter, dev_sinogram_float, nAngles, sinogram_width);
}
void cudaCallBackProjKernel(const unsigned int nBlocks,
const unsigned int threadsPerBlock,
float *output_dev,
const unsigned int nAngles,
const unsigned int sinogram_width,
const unsigned int width,
const unsigned int height) {
hipLaunchKernelGGL(( cudaBackProjKernel), dim3(nBlocks), dim3(threadsPerBlock), 0, 0, output_dev, nAngles, sinogram_width, width, height);
}
int main(int argc, char** argv){
// These functions allow you to select the least utilized GPU
// on your system as well as enforce a time limit on program execution.
// Please leave these enabled as a courtesy to your fellow classmates
// if you are using a shared computer. You may ignore or remove these
// functions if you are running on your local machine.
TA_Utilities::select_least_utilized_GPU();
int max_time_allowed_in_seconds = 30;
TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds);
// Begin timer and check for the correct number of inputs
time_t start = clock();
if (argc != 7){
fprintf(stderr, "Incorrect number of arguments.\n\n");
fprintf(stderr, "\nArguments: \n \
< Input sinogram text file's name > \n \
< Width or height of original image, whichever is larger > \n \
< Number of angles in sinogram >\n \
< threads per block >\n \
< number of blocks >\n \
< output text file's name >\n");
exit(EXIT_FAILURE);
}
/********** Parameters **********/
int width = atoi(argv[2]);
int height = width;
int sinogram_width = (int)ceilf( height * sqrt(2) );
int nAngles = atoi(argv[3]);
int threadsPerBlock = atoi(argv[4]);
int nBlocks = atoi(argv[5]);
/********** Data storage *********/
// GPU DATA STORAGE
hipfftComplex *dev_sinogram_cmplx;
float *dev_sinogram_float;
float* output_dev; // Image storage
hipfftComplex *sinogram_host;
size_t size_result = width*height*sizeof(float);
float *output_host = (float *)malloc(size_result);
/*********** Set up IO, Read in data ************/
sinogram_host = (hipfftComplex *)malloc( sinogram_width*nAngles*sizeof(hipfftComplex) );
FILE *dataFile = fopen(argv[1],"r");
if (dataFile == NULL){
fprintf(stderr, "Sinogram file missing\n");
exit(EXIT_FAILURE);
}
FILE *outputFile = fopen(argv[6], "w");
if (outputFile == NULL){
fprintf(stderr, "Output file cannot be written\n");
exit(EXIT_FAILURE);
}
int j, i;
for(i = 0; i < nAngles * sinogram_width; i++){
fscanf(dataFile,"%f",&sinogram_host[i].x);
sinogram_host[i].y = 0;
}
fclose(dataFile);
/*********** Assignment starts here *********/
/* TODO ok: Allocate memory for all GPU storage above, copy input sinogram
over to dev_sinogram_cmplx. */
// in texture memory:
gpuErrchk(hipMalloc((void**)&dev_sinogram_cmplx, nAngles * sinogram_width * sizeof(hipfftComplex)));
gpuErrchk(hipMemcpy(dev_sinogram_cmplx, sinogram_host, nAngles * sinogram_width * sizeof(hipfftComplex), hipMemcpyHostToDevice));
/* TODO 1 ok: Implement the high-pass filter:
- Use cuFFT for the forward FFT
- Create your own kernel for the frequency scaling.
- Use cuFFT for the inverse FFT
- extract real components to floats
- Free the original sinogram (dev_sinogram_cmplx)
Note: If you want to deal with real-to-complex and complex-to-real
transforms in cuFFT, you'll have to slightly change our code above.
*/
// // create the high pass filter vector
// hipfftComplex *filter_v = (hipfftComplex*)malloc(sizeof(hipfftComplex) * sinogram_width);
// for (int i = 0; i < sinogram_width; ++i) {
// filter_v[i].x = 1 - abs((float)(2 * i - sinogram_width) / sinogram_width);
// filter_v[i].y = 0;
// } // on freq domain
// DATA storage
// hipfftComplex *dev_filter_v;
// gpuErrchk(hipMalloc((void**)&dev_filter_v, sizeof(hipfftComplex) * sinogram_width));
// gpuErrchk(hipMemcpy(dev_filter_v, filter_v, sinogram_width * sizeof(hipfftComplex), hipMemcpyHostToDevice));
// hipfftComplex *dev_out_filter;
// gpuErrchk(hipMalloc((void**)&dev_out_filter, sizeof(hipfftComplex) * sinogram_width * nAngles));
hipfftHandle plan;
gpuFFTchk(hipfftPlan1d(&plan, sinogram_width, HIPFFT_C2C, nAngles));
gpuFFTchk(hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_FORWARD));
// call the kernel to perform the filter
cudaCallMultiplyKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, nAngles, sinogram_width);
checkCUDAKernelError();
printf("finish filter\n");
// inverse fft
gpuFFTchk(hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_BACKWARD));
// destroy the cufft plan
gpuFFTchk(hipfftDestroy(plan));
// take the float
gpuErrchk(hipMalloc((void**)&dev_sinogram_float, nAngles * sinogram_width * sizeof(float)));
cudaCallTakeFloatKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, dev_sinogram_float, nAngles, sinogram_width);
checkCUDAKernelError();
// free dev_sinogram_cmplx
gpuErrchk(hipFree(dev_sinogram_cmplx));
printf("finish fft\n");
// gpuErrchk(hipFree(dev_out_filter));
/* TODO 2: Implement backprojection.
- Allocate memory for the output image.
- Create your own kernel to accelerate backprojection.
- Copy the reconstructed image back to output_host.
- Free all remaining memory on the GPU.
*/
// first I think I will have to copy dev_sinogram_float from device to host;
// but actually it's not necessary
// float *host_sinogram_float = (float*)malloc(sizeof(float)*nAngles*sinogram_width);
// gpuErrchk(hipMemcpy(host_sinogram_float, dev_sinogram_float, sizeof(float)*nAngles*sinogram_width, hipMemcpyDeviceToHost));
// gpuErrchk(hipFree(dev_sinogram_float));
hipArray *cArray;
hipChannelFormatDesc channel;
channel = hipCreateChannelDesc<float>();
// allocate device memory for cuda array
gpuErrchk(hipMallocArray(&cArray, &channel, sinogram_width, nAngles));
int bytes = sizeof(float) * nAngles * sinogram_width;
gpuErrchk(hipMemcpyToArray(cArray, 0, 0, dev_sinogram_float, bytes, hipMemcpyDeviceToDevice));
// copy float sinogram from gloabl memory to texture memory =(
gpuErrchk(hipFree(dev_sinogram_float));
// set texture filter mode
texreference.filterMode = hipFilterModeLinear;
// set texture address mode
texreference.addressMode[0] = hipAddressModeWrap; // necessary???
texreference.addressMode[1] = hipAddressModeClamp;
// bind texture reference with cuda array
gpuErrchk(hipBindTextureToArray(texreference, cArray));
// Allocate memory for the output image.
gpuErrchk(hipMalloc((void**)&output_dev, size_result));
gpuErrchk(hipMemset(output_dev, 0, size_result));
// call kernel
cudaCallBackProjKernel(nBlocks, threadsPerBlock, output_dev, nAngles, sinogram_width, width, height);
checkCUDAKernelError();
// unbind texture
hipUnbindTexture(texreference);
// copy result out
gpuErrchk(hipMemcpy(output_host, output_dev, size_result, hipMemcpyDeviceToHost));
gpuErrchk(hipFree(output_dev));
gpuErrchk(hipFreeArray(cArray));
// Allocate memory for the output image.
// gpuErrchk(hipMalloc((void**)&output_dev, size_result));
// gpuErrchk(hipMemset(output_dev, 0, size_result));
// // call back projection kernel
// cudaCallBackProjKernel(nBlocks, threadsPerBlock, dev_sinogram_float, output_dev, nAngles, sinogram_width, width, height);
// printf("finish back proj\n");
// checkCUDAKernelError();
// gpuErrchk(hipMemcpy(output_host, output_dev, size_result, hipMemcpyDeviceToHost));
// gpuErrchk(hipFree(dev_sinogram_float));
// gpuErrchk(hipFree(output_dev));
/* Export image data. */
for(j = 0; j < width; j++){
for(i = 0; i < height; i++){
fprintf(outputFile, "%e ",output_host[j*width + i]);
}
fprintf(outputFile, "\n");
}
/* Cleanup: Free host memory, close files. */
free(sinogram_host);
free(output_host);
fclose(outputFile);
printf("CT reconstruction complete. Total run time: %f seconds\n", (float) (clock() - start) / 1000.0);
return 0;
}
|
0f22226e3f37c2c4077d47dc7bf70b41870e5092.cu
|
/*
Based off work by Nelson, et al.
Brigham Young University (2010)
Adapted by Kevin Yuh (2015)
Modified by Jordan Bonilla and Matthew Cedeno (2016)
*/
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cufft.h>
#include <time.h>
#include "ta_utilities.hpp"
#define PI 3.14159265358979
/* Check errors on CUDA runtime functions */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
// declare texture reference
texture<float, 2, cudaReadModeElementType> texreference;
/* Check errors on cuFFT functions */
void gpuFFTchk(int errval){
if (errval != CUFFT_SUCCESS){
printf("Failed FFT call, error code %d\n", errval);
}
}
/* Check errors on CUDA kernel calls */
void checkCUDAKernelError()
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
} else {
fprintf(stderr, "No kernel error detected\n");
}
}
__global__
void cudaMultiplyKernel(cufftComplex *raw_data,
unsigned int nAngles, unsigned int sinogram_width) {
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < nAngles * sinogram_width) {
unsigned int p = thread_index % sinogram_width;
if (p < sinogram_width / 2) {
raw_data[thread_index].x = raw_data[thread_index].x * ((2.0 * p) / sinogram_width);
raw_data[thread_index].y = raw_data[thread_index].y * ((2.0 * p) / sinogram_width);
} else {
raw_data[thread_index].x = raw_data[thread_index].x * ((2.0 * (sinogram_width - p)) / sinogram_width);
raw_data[thread_index].y = raw_data[thread_index].y * ((2.0 * (sinogram_width - p)) / sinogram_width);
}
// raw_data[thread_index].x = raw_data[thread_index].x;
// raw_data[thread_index].y = raw_data[thread_index].y;
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void cudaTakeFloatKernel(const cufftComplex *dev_out_filter,
float *dev_sinogram_float, const unsigned int nAngles, const unsigned int sinogram_width) {
unsigned int thread_index = blockDim.x * blockIdx.x + threadIdx.x;
while (thread_index < nAngles * sinogram_width) {
dev_sinogram_float[thread_index] = dev_out_filter[thread_index].x;
thread_index += blockDim.x * gridDim.x;
}
}
__global__
void cudaBackProjKernel(float *output_dev,
const unsigned int nAngles,
const unsigned int sinogram_width,
const unsigned int width,
const unsigned int height) {
unsigned int thread_index = blockDim.x * blockIdx.x + threadIdx.x;
while (thread_index < width * height) {
int y0 = height/2 - thread_index / width;
int x0 = thread_index % width - width /2;
for (int i = 0; i < nAngles; ++i) {
float sita = (float)i * PI / nAngles;
float d, xi, yi, q;
if (sita == 0) {
d = x0;
} else if (sita == PI / 2) {
d = y0;
} else {
float m = -cos(sita)/sin(sita);
q = -1/m;
xi = (y0 - m * x0)/(q - m);
yi = q * xi;
d = sqrtf(xi * xi + yi * yi);
}
if ((q > 0 && xi < 0)||(q < 0 && xi > 0)) {
output_dev[thread_index] += tex2D(texreference, sinogram_width/2-d, i); // ( , xindex, yindex)
} else {
output_dev[thread_index] += tex2D(texreference, sinogram_width/2+d, i);
// output_dev[index] += dev_sinogram_float[(int)(i * sinogram_width + d + sinogram_width / 2)];
}
}
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallMultiplyKernel (const unsigned int blocks,
const unsigned int threadsPerBlock,
cufftComplex *raw_data,
const unsigned int nAngles,
const unsigned int sinogram_width) {
cudaMultiplyKernel<<<blocks, threadsPerBlock>>>(raw_data, nAngles, sinogram_width);
}
void cudaCallTakeFloatKernel(const unsigned int nBlocks,
const unsigned int threadsPerBlock,
const cufftComplex *dev_out_filter,
float *dev_sinogram_float,
const unsigned int nAngles,
const unsigned int sinogram_width) {
cudaTakeFloatKernel<<<nBlocks, threadsPerBlock>>>(dev_out_filter, dev_sinogram_float, nAngles, sinogram_width);
}
void cudaCallBackProjKernel(const unsigned int nBlocks,
const unsigned int threadsPerBlock,
float *output_dev,
const unsigned int nAngles,
const unsigned int sinogram_width,
const unsigned int width,
const unsigned int height) {
cudaBackProjKernel<<<nBlocks, threadsPerBlock>>>(output_dev, nAngles, sinogram_width, width, height);
}
int main(int argc, char** argv){
// These functions allow you to select the least utilized GPU
// on your system as well as enforce a time limit on program execution.
// Please leave these enabled as a courtesy to your fellow classmates
// if you are using a shared computer. You may ignore or remove these
// functions if you are running on your local machine.
TA_Utilities::select_least_utilized_GPU();
int max_time_allowed_in_seconds = 30;
TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds);
// Begin timer and check for the correct number of inputs
time_t start = clock();
if (argc != 7){
fprintf(stderr, "Incorrect number of arguments.\n\n");
fprintf(stderr, "\nArguments: \n \
< Input sinogram text file's name > \n \
< Width or height of original image, whichever is larger > \n \
< Number of angles in sinogram >\n \
< threads per block >\n \
< number of blocks >\n \
< output text file's name >\n");
exit(EXIT_FAILURE);
}
/********** Parameters **********/
int width = atoi(argv[2]);
int height = width;
int sinogram_width = (int)ceilf( height * sqrt(2) );
int nAngles = atoi(argv[3]);
int threadsPerBlock = atoi(argv[4]);
int nBlocks = atoi(argv[5]);
/********** Data storage *********/
// GPU DATA STORAGE
cufftComplex *dev_sinogram_cmplx;
float *dev_sinogram_float;
float* output_dev; // Image storage
cufftComplex *sinogram_host;
size_t size_result = width*height*sizeof(float);
float *output_host = (float *)malloc(size_result);
/*********** Set up IO, Read in data ************/
sinogram_host = (cufftComplex *)malloc( sinogram_width*nAngles*sizeof(cufftComplex) );
FILE *dataFile = fopen(argv[1],"r");
if (dataFile == NULL){
fprintf(stderr, "Sinogram file missing\n");
exit(EXIT_FAILURE);
}
FILE *outputFile = fopen(argv[6], "w");
if (outputFile == NULL){
fprintf(stderr, "Output file cannot be written\n");
exit(EXIT_FAILURE);
}
int j, i;
for(i = 0; i < nAngles * sinogram_width; i++){
fscanf(dataFile,"%f",&sinogram_host[i].x);
sinogram_host[i].y = 0;
}
fclose(dataFile);
/*********** Assignment starts here *********/
/* TODO ok: Allocate memory for all GPU storage above, copy input sinogram
over to dev_sinogram_cmplx. */
// in texture memory:
gpuErrchk(cudaMalloc((void**)&dev_sinogram_cmplx, nAngles * sinogram_width * sizeof(cufftComplex)));
gpuErrchk(cudaMemcpy(dev_sinogram_cmplx, sinogram_host, nAngles * sinogram_width * sizeof(cufftComplex), cudaMemcpyHostToDevice));
/* TODO 1 ok: Implement the high-pass filter:
- Use cuFFT for the forward FFT
- Create your own kernel for the frequency scaling.
- Use cuFFT for the inverse FFT
- extract real components to floats
- Free the original sinogram (dev_sinogram_cmplx)
Note: If you want to deal with real-to-complex and complex-to-real
transforms in cuFFT, you'll have to slightly change our code above.
*/
// // create the high pass filter vector
// cufftComplex *filter_v = (cufftComplex*)malloc(sizeof(cufftComplex) * sinogram_width);
// for (int i = 0; i < sinogram_width; ++i) {
// filter_v[i].x = 1 - abs((float)(2 * i - sinogram_width) / sinogram_width);
// filter_v[i].y = 0;
// } // on freq domain
// DATA storage
// cufftComplex *dev_filter_v;
// gpuErrchk(cudaMalloc((void**)&dev_filter_v, sizeof(cufftComplex) * sinogram_width));
// gpuErrchk(cudaMemcpy(dev_filter_v, filter_v, sinogram_width * sizeof(cufftComplex), cudaMemcpyHostToDevice));
// cufftComplex *dev_out_filter;
// gpuErrchk(cudaMalloc((void**)&dev_out_filter, sizeof(cufftComplex) * sinogram_width * nAngles));
cufftHandle plan;
gpuFFTchk(cufftPlan1d(&plan, sinogram_width, CUFFT_C2C, nAngles));
gpuFFTchk(cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_FORWARD));
// call the kernel to perform the filter
cudaCallMultiplyKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, nAngles, sinogram_width);
checkCUDAKernelError();
printf("finish filter\n");
// inverse fft
gpuFFTchk(cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_INVERSE));
// destroy the cufft plan
gpuFFTchk(cufftDestroy(plan));
// take the float
gpuErrchk(cudaMalloc((void**)&dev_sinogram_float, nAngles * sinogram_width * sizeof(float)));
cudaCallTakeFloatKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, dev_sinogram_float, nAngles, sinogram_width);
checkCUDAKernelError();
// free dev_sinogram_cmplx
gpuErrchk(cudaFree(dev_sinogram_cmplx));
printf("finish fft\n");
// gpuErrchk(cudaFree(dev_out_filter));
/* TODO 2: Implement backprojection.
- Allocate memory for the output image.
- Create your own kernel to accelerate backprojection.
- Copy the reconstructed image back to output_host.
- Free all remaining memory on the GPU.
*/
// first I think I will have to copy dev_sinogram_float from device to host;
// but actually it's not necessary
// float *host_sinogram_float = (float*)malloc(sizeof(float)*nAngles*sinogram_width);
// gpuErrchk(cudaMemcpy(host_sinogram_float, dev_sinogram_float, sizeof(float)*nAngles*sinogram_width, cudaMemcpyDeviceToHost));
// gpuErrchk(cudaFree(dev_sinogram_float));
cudaArray *cArray;
cudaChannelFormatDesc channel;
channel = cudaCreateChannelDesc<float>();
// allocate device memory for cuda array
gpuErrchk(cudaMallocArray(&cArray, &channel, sinogram_width, nAngles));
int bytes = sizeof(float) * nAngles * sinogram_width;
gpuErrchk(cudaMemcpyToArray(cArray, 0, 0, dev_sinogram_float, bytes, cudaMemcpyDeviceToDevice));
// copy float sinogram from gloabl memory to texture memory =(
gpuErrchk(cudaFree(dev_sinogram_float));
// set texture filter mode
texreference.filterMode = cudaFilterModeLinear;
// set texture address mode
texreference.addressMode[0] = cudaAddressModeWrap; // necessary???
texreference.addressMode[1] = cudaAddressModeClamp;
// bind texture reference with cuda array
gpuErrchk(cudaBindTextureToArray(texreference, cArray));
// Allocate memory for the output image.
gpuErrchk(cudaMalloc((void**)&output_dev, size_result));
gpuErrchk(cudaMemset(output_dev, 0, size_result));
// call kernel
cudaCallBackProjKernel(nBlocks, threadsPerBlock, output_dev, nAngles, sinogram_width, width, height);
checkCUDAKernelError();
// unbind texture
cudaUnbindTexture(texreference);
// copy result out
gpuErrchk(cudaMemcpy(output_host, output_dev, size_result, cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(output_dev));
gpuErrchk(cudaFreeArray(cArray));
// Allocate memory for the output image.
// gpuErrchk(cudaMalloc((void**)&output_dev, size_result));
// gpuErrchk(cudaMemset(output_dev, 0, size_result));
// // call back projection kernel
// cudaCallBackProjKernel(nBlocks, threadsPerBlock, dev_sinogram_float, output_dev, nAngles, sinogram_width, width, height);
// printf("finish back proj\n");
// checkCUDAKernelError();
// gpuErrchk(cudaMemcpy(output_host, output_dev, size_result, cudaMemcpyDeviceToHost));
// gpuErrchk(cudaFree(dev_sinogram_float));
// gpuErrchk(cudaFree(output_dev));
/* Export image data. */
for(j = 0; j < width; j++){
for(i = 0; i < height; i++){
fprintf(outputFile, "%e ",output_host[j*width + i]);
}
fprintf(outputFile, "\n");
}
/* Cleanup: Free host memory, close files. */
free(sinogram_host);
free(output_host);
fclose(outputFile);
printf("CT reconstruction complete. Total run time: %f seconds\n", (float) (clock() - start) / 1000.0);
return 0;
}
|
b84a64c512eaa5d22945e793670b23da91ebda11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda_gl_interop.h>
#include <fstream>
#include <regex>
#include <string>
#include <algorithm>
#include <random>
#include <chrono>
#include <cudaDefs.h>
#include <FreeImage.h>
#include <imageManager.h>
using namespace std;
//TODO -> update TPB_1D and TPB_2D values
constexpr unsigned int TPB_1D = 8; // ThreadsPerBlock in one dimension
constexpr unsigned int TPB_2D = TPB_1D * TPB_1D; // ThreadsPerBlock = TPB_1D*TPB_1D (2D block)
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
std::mt19937 generator(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count());
struct GLData {
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP;
unsigned int imagePitch;
unsigned int pboID;
unsigned int textureID;
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 1024;
};
struct CudaData {
hipTextureDesc texDesc; // Texture descriptor used to describe texture parameters
hipArray_t texArrayData; // Source texture data
hipResourceDesc resDesc; // A resource descriptor for obtaining the texture data
hipChannelFormatDesc texChannelDesc; // Texture channel descriptor to define channel bytes
hipTextureObject_t texObj; // Cuda Texture Object to be produces
cudaGraphicsResource_t texResource;
cudaGraphicsResource_t pboResource;
CudaData() {
memset(this, 0, sizeof(CudaData)); // DO NOT DELETE THIS !!!
}
};
struct Settings {
int leaders;
int followers;
string heightMap;
int heightmapGridX;
int heightmapGridY;
float leaderRadius;
float speedFactor;
string outputFile;
string getAtribVal(string &str, const string& atribName) {
smatch m;
regex rt(atribName + "\":([\\S\\s]+?(?=,|}))");
regex_search(str, m, rt);
string val = m.str().substr(atribName.size()+2);
if (val.at(0) == '\"') {
val = val.substr(1, val.size()-2);
}
return val;
}
Settings() = default;
Settings(const string& path) {
fstream inStream(path);
std::string str((std::istreambuf_iterator<char>(inStream)),std::istreambuf_iterator<char>());
str.erase(remove_if(str.begin(), str.end(), ::isspace), str.end());
this->leaders = std::stoi(getAtribVal(str, "leaders"));
this->followers = std::stoi(getAtribVal(str, "followers"));
this->heightmapGridX = std::stoi(getAtribVal(str, "heightmapGridX"));
this->heightmapGridY = std::stoi(getAtribVal(str, "heightmapGridY"));
this->leaderRadius = std::stof(getAtribVal(str, "leaderRadius"));
this->speedFactor = std::stof(getAtribVal(str, "speedFactor"));
this->heightMap = getAtribVal(str, "heightmap");
this->outputFile = getAtribVal(str, "outputFile");
}
};
Settings settings;
struct HeightMap {
GLData glData;
CudaData cudaData;
void prepareGlObjects(const char* imageFileName) { //alokovani zdroju na karte
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
glData.imageWidth = FreeImage_GetWidth(tmp);
glData.imageHeight = FreeImage_GetHeight(tmp);
glData.imageBPP = FreeImage_GetBPP(tmp);
glData.imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glData.textureID);
glBindTexture(GL_TEXTURE_2D, glData.textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, glData.imageWidth, glData.imageHeight, 0, GL_RED, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glBindTexture(GL_TEXTURE_2D, 0);
FreeImage_Unload(tmp);
glGenBuffers(1, &glData.pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, glData.pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, glData.imageWidth * glData.imageHeight * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
void initCUDAObjects() {
// Register Image to cuda tex resource
checkCudaErrors(hipGraphicsGLRegisterImage(
&cudaData.texResource,
glData.textureID,
GL_TEXTURE_2D,
hipGraphicsRegisterFlags::hipGraphicsRegisterFlagsReadOnly
));
// Map reousrce and retrieve pointer to undelying array data
checkCudaErrors(hipGraphicsMapResources(1, &cudaData.texResource, 0)); //OPENGL, pls nepracuj ted s tou texturou
checkCudaErrors(hipGraphicsSubResourceGetMappedArray(&cudaData.texArrayData, cudaData.texResource, 0, 0)); //z resourcu chci tahat pixelova data
// Set resource descriptor
cudaData.resDesc.resType = hipResourceType::hipResourceTypeArray;
cudaData.resDesc.res.array.array = cudaData.texArrayData;
// Set Texture Descriptor: Tex Units will know how to read the texture
cudaData.texDesc.readMode = hipReadModeElementType;
cudaData.texDesc.normalizedCoords = false;
cudaData.texDesc.filterMode = hipFilterModePoint;
cudaData.texDesc.addressMode[0] = hipAddressModeClamp;
cudaData.texDesc.addressMode[1] = hipAddressModeClamp;
// Set Channel Descriptor: How to interpret individual bytes
checkCudaErrors(hipGetChannelDesc(&cudaData.texChannelDesc, cudaData.texArrayData));
// Create CUDA Texture Object
checkCudaErrors(hipCreateTextureObject(&cudaData.texObj, &cudaData.resDesc, &cudaData.texDesc, nullptr));
// Unmap resource: Release the resource for OpenGL
checkCudaErrors(hipGraphicsUnmapResources(1, &cudaData.texResource, 0));
// Register PBO
checkCudaErrors(hipGraphicsGLRegisterBuffer(
&cudaData.pboResource,
glData.pboID,
hipGraphicsRegisterFlags::hipGraphicsRegisterFlagsWriteDiscard
));
}
void init(const string& path) {
prepareGlObjects(path.c_str());
initCUDAObjects();
}
~HeightMap() {
checkCudaErrors(hipGraphicsUnregisterResource(this->cudaData.texResource));
checkCudaErrors(hipGraphicsUnregisterResource(this->cudaData.pboResource));
if (this->glData.textureID > 0)
glDeleteTextures(1, &this->glData.textureID);
if (this->glData.pboID > 0)
glDeleteBuffers(1, &this->glData.pboID);
}
};
HeightMap hMap;
unsigned int overlayTexId;
struct Particle {
float x, y; // POSITION
float v_x { 0.0f }; // VELOCITY IN DIRECTION X
float v_y { 0.0f }; // VELOCITY IN DIRECTION Y
};
Particle* dLeaders;
Particle* dFollowers;
std::vector<Particle> generateParticles(int n) {
std::vector<Particle> result;
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < n; i++) {
result.push_back(Particle{
dis(generator) * hMap.glData.imageWidth,
dis(generator) * hMap.glData.imageHeight,
0.0f, 0.0f
});
}
return result;
}
#pragma region --- CUDA ---
__global__ void clearPBO(unsigned char* pbo, const unsigned int pboWidth, const unsigned int pboHeight) {
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= pboWidth || ty > pboHeight) return;
unsigned int pboIdx = ((ty * pboWidth) + tx) * 4;
pbo[pboIdx++] = 0;
pbo[pboIdx++] = 0;
pbo[pboIdx++] = 0;
pbo[pboIdx] = 0;
}
__global__ void renderParticles(uchar3 color, Particle* particles, int particleCount, unsigned char* pbo, const unsigned int pboWidth, const unsigned int pboHeight) {
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jump = blockDim.x * gridDim.x;
while (tx < particleCount) {
Particle p = particles[tx];
unsigned int pboIdx = ((floor(p.y) * pboWidth) + floor(p.x)) * 4;
pbo[pboIdx++] = color.x;
pbo[pboIdx++] = color.y;
pbo[pboIdx++] = color.z;
pbo[pboIdx] = 255;
tx += jump;
}
}
void cudaWorker() {
// Map GL resources
checkCudaErrors(hipGraphicsMapResources(1, &hMap.cudaData.texResource, 0));
checkCudaErrors(hipGraphicsSubResourceGetMappedArray(&hMap.cudaData.texArrayData, hMap.cudaData.texResource, 0, 0));
// TODO -> move pbo resource to be part of overlay texture
checkCudaErrors(hipGraphicsMapResources(1, &hMap.cudaData.pboResource, 0));
unsigned char* pboData;
size_t pboSize;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, hMap.cudaData.pboResource));
{ // CLEAR PBO
dim3 block(TPB_1D, TPB_1D, 1);
dim3 grid((hMap.glData.imageWidth + TPB_1D - 1) / TPB_1D, (hMap.glData.imageHeight + TPB_1D - 1) / TPB_1D, 1);
hipLaunchKernelGGL(( clearPBO), dim3(grid), dim3(block), 0, 0, pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
};
{ // PUT PARTCLES INTO PBO
constexpr uchar3 leaderColor = {255, 0, 0};
constexpr uchar3 followerColor = {0, 0, 255};
//TODO -> adjust block and grid sizes
dim3 block(128, 1, 1);
dim3 grid(1, 1, 1);
hipLaunchKernelGGL(( renderParticles), dim3(grid), dim3(block), 0, 0, leaderColor, dLeaders, settings.leaders, pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
hipLaunchKernelGGL(( renderParticles), dim3(grid), dim3(block), 0, 0, followerColor, dFollowers, settings.followers, pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
};
// TODO -> Run kernel
// Unmap GL Resources
checkCudaErrors(hipGraphicsUnmapResources(1, &hMap.cudaData.texResource, 0));
checkCudaErrors(hipGraphicsUnmapResources(1, &hMap.cudaData.pboResource, 0));
// This updates GL texture from PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, hMap.glData.pboID);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, hMap.glData.imageWidth, hMap.glData.imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
#pragma endregion
#pragma region --- OPEN_GL ---
void my_display() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
//glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, hMap.glData.textureID);
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d( hMap.glData.viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d( hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, hMap.glData.viewportHeight);
glEnd();
//glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d( hMap.glData.viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d( hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, hMap.glData.viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h) {
hMap.glData.viewportWidth = w;
hMap.glData.viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, hMap.glData.viewportWidth, 0, hMap.glData.viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle() {
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glutInitWindowPosition(0, 0);
glutSetOption(GLUT_RENDERING_CONTEXT, false ? GLUT_USE_CURRENT_CONTEXT : GLUT_CREATE_NEW_CONTEXT);
glutCreateWindow(0);
char m_windowsTitle[512];
snprintf(m_windowsTitle, 512, "SimpleView | context %s | renderer %s | vendor %s ",
(const char*)glGetString(GL_VERSION),
(const char*)glGetString(GL_RENDERER),
(const char*)glGetString(GL_VENDOR));
glutSetWindowTitle(m_windowsTitle);
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glFlush();
}
#pragma endregion
void initOverlayTex() {
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &overlayTexId);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
std::vector<GLubyte> emptyData(hMap.glData.imageWidth * hMap.glData.imageHeight * 4, 128);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, hMap.glData.imageWidth, hMap.glData.imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, &emptyData[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glBindTexture(GL_TEXTURE_2D, 0);
}
int main(int argc, char* argv[]) {
#pragma region initialize
initializeCUDA(deviceProp);
if (argc < 2) {
printf("Please specify path to the configuration path");
return 1;
}
settings = Settings(argv[1]);
initGL(1, argv);
// INITIALIZE HEIHGHT MAP
hMap.init(settings.heightMap);
auto a = glGetError();
initOverlayTex();
a = glGetError();
// CREATE LEADERS AND COPY TO DEVICE
auto leaders = generateParticles(settings.leaders);
hipMalloc((void**)&dLeaders, settings.leaders * sizeof(Particle));
hipMemcpy(dLeaders, leaders.data(), settings.leaders * sizeof(Particle), hipMemcpyHostToDevice);
// CREATE FOLLOWERS AND COPY TO DEVICE
auto followers = generateParticles(settings.followers);
hipMalloc((void**)&dFollowers, settings.followers * sizeof(Particle));
hipMemcpy(dFollowers, followers.data(), settings.followers * sizeof(Particle), hipMemcpyHostToDevice);
#pragma endregion
glutMainLoop();
#pragma region clean_up
if (dLeaders) hipFree(dLeaders);
if (dFollowers) hipFree(dFollowers);
#pragma endregion
return 0;
}
|
b84a64c512eaa5d22945e793670b23da91ebda11.cu
|
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda_gl_interop.h>
#include <fstream>
#include <regex>
#include <string>
#include <algorithm>
#include <random>
#include <chrono>
#include <cudaDefs.h>
#include <FreeImage.h>
#include <imageManager.h>
using namespace std;
//TODO -> update TPB_1D and TPB_2D values
constexpr unsigned int TPB_1D = 8; // ThreadsPerBlock in one dimension
constexpr unsigned int TPB_2D = TPB_1D * TPB_1D; // ThreadsPerBlock = TPB_1D*TPB_1D (2D block)
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
std::mt19937 generator(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count());
struct GLData {
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP;
unsigned int imagePitch;
unsigned int pboID;
unsigned int textureID;
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 1024;
};
struct CudaData {
cudaTextureDesc texDesc; // Texture descriptor used to describe texture parameters
cudaArray_t texArrayData; // Source texture data
cudaResourceDesc resDesc; // A resource descriptor for obtaining the texture data
cudaChannelFormatDesc texChannelDesc; // Texture channel descriptor to define channel bytes
cudaTextureObject_t texObj; // Cuda Texture Object to be produces
cudaGraphicsResource_t texResource;
cudaGraphicsResource_t pboResource;
CudaData() {
memset(this, 0, sizeof(CudaData)); // DO NOT DELETE THIS !!!
}
};
struct Settings {
int leaders;
int followers;
string heightMap;
int heightmapGridX;
int heightmapGridY;
float leaderRadius;
float speedFactor;
string outputFile;
string getAtribVal(string &str, const string& atribName) {
smatch m;
regex rt(atribName + "\":([\\S\\s]+?(?=,|}))");
regex_search(str, m, rt);
string val = m.str().substr(atribName.size()+2);
if (val.at(0) == '\"') {
val = val.substr(1, val.size()-2);
}
return val;
}
Settings() = default;
Settings(const string& path) {
fstream inStream(path);
std::string str((std::istreambuf_iterator<char>(inStream)),std::istreambuf_iterator<char>());
str.erase(remove_if(str.begin(), str.end(), ::isspace), str.end());
this->leaders = std::stoi(getAtribVal(str, "leaders"));
this->followers = std::stoi(getAtribVal(str, "followers"));
this->heightmapGridX = std::stoi(getAtribVal(str, "heightmapGridX"));
this->heightmapGridY = std::stoi(getAtribVal(str, "heightmapGridY"));
this->leaderRadius = std::stof(getAtribVal(str, "leaderRadius"));
this->speedFactor = std::stof(getAtribVal(str, "speedFactor"));
this->heightMap = getAtribVal(str, "heightmap");
this->outputFile = getAtribVal(str, "outputFile");
}
};
Settings settings;
struct HeightMap {
GLData glData;
CudaData cudaData;
void prepareGlObjects(const char* imageFileName) { //alokovani zdroju na karte
FIBITMAP* tmp = ImageManager::GenericLoader(imageFileName, 0);
glData.imageWidth = FreeImage_GetWidth(tmp);
glData.imageHeight = FreeImage_GetHeight(tmp);
glData.imageBPP = FreeImage_GetBPP(tmp);
glData.imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glData.textureID);
glBindTexture(GL_TEXTURE_2D, glData.textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, glData.imageWidth, glData.imageHeight, 0, GL_RED, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glBindTexture(GL_TEXTURE_2D, 0);
FreeImage_Unload(tmp);
glGenBuffers(1, &glData.pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, glData.pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, glData.imageWidth * glData.imageHeight * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
void initCUDAObjects() {
// Register Image to cuda tex resource
checkCudaErrors(cudaGraphicsGLRegisterImage(
&cudaData.texResource,
glData.textureID,
GL_TEXTURE_2D,
cudaGraphicsRegisterFlags::cudaGraphicsRegisterFlagsReadOnly
));
// Map reousrce and retrieve pointer to undelying array data
checkCudaErrors(cudaGraphicsMapResources(1, &cudaData.texResource, 0)); //OPENGL, pls nepracuj ted s tou texturou
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&cudaData.texArrayData, cudaData.texResource, 0, 0)); //z resourcu chci tahat pixelova data
// Set resource descriptor
cudaData.resDesc.resType = cudaResourceType::cudaResourceTypeArray;
cudaData.resDesc.res.array.array = cudaData.texArrayData;
// Set Texture Descriptor: Tex Units will know how to read the texture
cudaData.texDesc.readMode = cudaReadModeElementType;
cudaData.texDesc.normalizedCoords = false;
cudaData.texDesc.filterMode = cudaFilterModePoint;
cudaData.texDesc.addressMode[0] = cudaAddressModeClamp;
cudaData.texDesc.addressMode[1] = cudaAddressModeClamp;
// Set Channel Descriptor: How to interpret individual bytes
checkCudaErrors(cudaGetChannelDesc(&cudaData.texChannelDesc, cudaData.texArrayData));
// Create CUDA Texture Object
checkCudaErrors(cudaCreateTextureObject(&cudaData.texObj, &cudaData.resDesc, &cudaData.texDesc, nullptr));
// Unmap resource: Release the resource for OpenGL
checkCudaErrors(cudaGraphicsUnmapResources(1, &cudaData.texResource, 0));
// Register PBO
checkCudaErrors(cudaGraphicsGLRegisterBuffer(
&cudaData.pboResource,
glData.pboID,
cudaGraphicsRegisterFlags::cudaGraphicsRegisterFlagsWriteDiscard
));
}
void init(const string& path) {
prepareGlObjects(path.c_str());
initCUDAObjects();
}
~HeightMap() {
checkCudaErrors(cudaGraphicsUnregisterResource(this->cudaData.texResource));
checkCudaErrors(cudaGraphicsUnregisterResource(this->cudaData.pboResource));
if (this->glData.textureID > 0)
glDeleteTextures(1, &this->glData.textureID);
if (this->glData.pboID > 0)
glDeleteBuffers(1, &this->glData.pboID);
}
};
HeightMap hMap;
unsigned int overlayTexId;
struct Particle {
float x, y; // POSITION
float v_x { 0.0f }; // VELOCITY IN DIRECTION X
float v_y { 0.0f }; // VELOCITY IN DIRECTION Y
};
Particle* dLeaders;
Particle* dFollowers;
std::vector<Particle> generateParticles(int n) {
std::vector<Particle> result;
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < n; i++) {
result.push_back(Particle{
dis(generator) * hMap.glData.imageWidth,
dis(generator) * hMap.glData.imageHeight,
0.0f, 0.0f
});
}
return result;
}
#pragma region --- CUDA ---
__global__ void clearPBO(unsigned char* pbo, const unsigned int pboWidth, const unsigned int pboHeight) {
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= pboWidth || ty > pboHeight) return;
unsigned int pboIdx = ((ty * pboWidth) + tx) * 4;
pbo[pboIdx++] = 0;
pbo[pboIdx++] = 0;
pbo[pboIdx++] = 0;
pbo[pboIdx] = 0;
}
__global__ void renderParticles(uchar3 color, Particle* particles, int particleCount, unsigned char* pbo, const unsigned int pboWidth, const unsigned int pboHeight) {
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jump = blockDim.x * gridDim.x;
while (tx < particleCount) {
Particle p = particles[tx];
unsigned int pboIdx = ((floor(p.y) * pboWidth) + floor(p.x)) * 4;
pbo[pboIdx++] = color.x;
pbo[pboIdx++] = color.y;
pbo[pboIdx++] = color.z;
pbo[pboIdx] = 255;
tx += jump;
}
}
void cudaWorker() {
// Map GL resources
checkCudaErrors(cudaGraphicsMapResources(1, &hMap.cudaData.texResource, 0));
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&hMap.cudaData.texArrayData, hMap.cudaData.texResource, 0, 0));
// TODO -> move pbo resource to be part of overlay texture
checkCudaErrors(cudaGraphicsMapResources(1, &hMap.cudaData.pboResource, 0));
unsigned char* pboData;
size_t pboSize;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, hMap.cudaData.pboResource));
{ // CLEAR PBO
dim3 block(TPB_1D, TPB_1D, 1);
dim3 grid((hMap.glData.imageWidth + TPB_1D - 1) / TPB_1D, (hMap.glData.imageHeight + TPB_1D - 1) / TPB_1D, 1);
clearPBO<<<grid, block>>>(pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
};
{ // PUT PARTCLES INTO PBO
constexpr uchar3 leaderColor = {255, 0, 0};
constexpr uchar3 followerColor = {0, 0, 255};
//TODO -> adjust block and grid sizes
dim3 block(128, 1, 1);
dim3 grid(1, 1, 1);
renderParticles<<<grid, block>>>(leaderColor, dLeaders, settings.leaders, pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
renderParticles<<<grid, block>>>(followerColor, dFollowers, settings.followers, pboData, hMap.glData.imageWidth, hMap.glData.imageHeight);
};
// TODO -> Run kernel
// Unmap GL Resources
checkCudaErrors(cudaGraphicsUnmapResources(1, &hMap.cudaData.texResource, 0));
checkCudaErrors(cudaGraphicsUnmapResources(1, &hMap.cudaData.pboResource, 0));
// This updates GL texture from PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, hMap.glData.pboID);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, hMap.glData.imageWidth, hMap.glData.imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
#pragma endregion
#pragma region --- OPEN_GL ---
void my_display() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
//glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, hMap.glData.textureID);
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d( hMap.glData.viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d( hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, hMap.glData.viewportHeight);
glEnd();
//glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
glBegin(GL_QUADS);
glTexCoord2d(0, 0); glVertex2d(0, 0);
glTexCoord2d(1, 0); glVertex2d( hMap.glData.viewportWidth, 0);
glTexCoord2d(1, 1); glVertex2d( hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glTexCoord2d(0, 1); glVertex2d(0, hMap.glData.viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h) {
hMap.glData.viewportWidth = w;
hMap.glData.viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, hMap.glData.viewportWidth, 0, hMap.glData.viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle() {
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glutInitWindowPosition(0, 0);
glutSetOption(GLUT_RENDERING_CONTEXT, false ? GLUT_USE_CURRENT_CONTEXT : GLUT_CREATE_NEW_CONTEXT);
glutCreateWindow(0);
char m_windowsTitle[512];
snprintf(m_windowsTitle, 512, "SimpleView | context %s | renderer %s | vendor %s ",
(const char*)glGetString(GL_VERSION),
(const char*)glGetString(GL_RENDERER),
(const char*)glGetString(GL_VENDOR));
glutSetWindowTitle(m_windowsTitle);
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, hMap.glData.viewportWidth, hMap.glData.viewportHeight);
glFlush();
}
#pragma endregion
void initOverlayTex() {
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &overlayTexId);
glBindTexture(GL_TEXTURE_2D, overlayTexId);
std::vector<GLubyte> emptyData(hMap.glData.imageWidth * hMap.glData.imageHeight * 4, 128);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, hMap.glData.imageWidth, hMap.glData.imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, &emptyData[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glBindTexture(GL_TEXTURE_2D, 0);
}
int main(int argc, char* argv[]) {
#pragma region initialize
initializeCUDA(deviceProp);
if (argc < 2) {
printf("Please specify path to the configuration path");
return 1;
}
settings = Settings(argv[1]);
initGL(1, argv);
// INITIALIZE HEIHGHT MAP
hMap.init(settings.heightMap);
auto a = glGetError();
initOverlayTex();
a = glGetError();
// CREATE LEADERS AND COPY TO DEVICE
auto leaders = generateParticles(settings.leaders);
cudaMalloc((void**)&dLeaders, settings.leaders * sizeof(Particle));
cudaMemcpy(dLeaders, leaders.data(), settings.leaders * sizeof(Particle), cudaMemcpyHostToDevice);
// CREATE FOLLOWERS AND COPY TO DEVICE
auto followers = generateParticles(settings.followers);
cudaMalloc((void**)&dFollowers, settings.followers * sizeof(Particle));
cudaMemcpy(dFollowers, followers.data(), settings.followers * sizeof(Particle), cudaMemcpyHostToDevice);
#pragma endregion
glutMainLoop();
#pragma region clean_up
if (dLeaders) cudaFree(dLeaders);
if (dFollowers) cudaFree(dFollowers);
#pragma endregion
return 0;
}
|
e0145d0608d3a2189fdaef4c49098b6c1c5710b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( RoIAlignRotatedBackward<float>)
, dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
dY.size(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
|
e0145d0608d3a2189fdaef4c49098b6c1c5710b4.cu
|
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_gradient_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignRotatedBackward(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
} // namespace
template <>
bool RoIAlignRotatedGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& dY = Input(2); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
dX->ResizeLike(X);
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
RoIAlignRotatedBackward<float>
<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
dY.size(),
dY.data<float>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
dX->mutable_data<float>(),
R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(
RoIAlignRotatedGradient,
RoIAlignRotatedGradientOp<float, CUDAContext>);
} // namespace caffe2
|
d7255cb50e713e38a18bd7156daccc492d6f9319.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, system
#include <ctime>
#include <iomanip>
#include <iostream>
#include <fstream>
// includes CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// includes Thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include "config.h"
#include "Constants.h"
#include "gas_disk.h"
#include "nbody.h"
#include "nbody_exception.h"
#include "ode.h"
#include "options.h"
using namespace std;
static hipError_t HandleError(hipError_t cudaStatus, const char *file, int line)
{
if (hipSuccess != cudaStatus) {
printf( "%s in %s at line %d\n", hipGetErrorString( cudaStatus ), file, line );
return cudaStatus;
}
return cudaStatus;
}
#define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__))
__global__
void print_gas_disc(gas_disk *gasDisk)
{
printf("eta: %10lf, %10lf\n", gasDisk->eta.x, gasDisk->eta.y);
printf("rho: %10lf, %10lf\n", gasDisk->rho.x, gasDisk->rho.y);
printf("sch: %10lf, %10lf\n", gasDisk->sch.x, gasDisk->sch.y);
printf("tau: %10lf, %10lf\n", gasDisk->tau.x, gasDisk->tau.y);
}
hipError_t unit_test_cpy_gas_disc_to_dev()
{
hipError_t cudaStatus = hipSuccess;
bool succeeded = true;
char func_name[256];
char err_msg[1024];
{
bool failed = false;
strcpy(func_name, "unit_test_cpy_gas_disc_to_dev");
var2_t eta = {2.0e-3, 1.0/2.0 };
var2_t rho = {1.0e-9, -11.0/4.0 }; // g / cm^3
var2_t sch = {5.0e-2, 5.0/4.0 };
var2_t tau = {2.0/3.0, 2.0 };
rho.x *= Constants::GramPerCm3ToSolarPerAu3; // M_sun / AU^3
gas_disk* gasDisk;
gas_disk* d_gasDisk;
gasDisk = new gas_disk(rho, sch, eta, tau);
cout << "gasDisk: " << endl;
cout << *gasDisk;
cudaStatus = HANDLE_ERROR(hipMalloc((void**)&d_gasDisk, sizeof(gas_disk)));
if (cudaStatus != hipSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
cudaStatus = HANDLE_ERROR(hipMemcpy(d_gasDisk, gasDisk, sizeof(gas_disk), hipMemcpyHostToDevice ));
if (cudaStatus != hipSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
hipLaunchKernelGGL(( print_gas_disc), dim3(1),dim3(1), 0, 0, d_gasDisk);
cudaStatus = HANDLE_ERROR(hipGetLastError());
if (cudaStatus != hipSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
hipFree(d_gasDisk);
delete gasDisk;
}
return cudaStatus;
}
// a = a + b
__global__
void add_two_vector(int_t n, var_t *a, const var_t *b)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (n > tid) {
a[tid] += b[tid];
}
}
hipError_t unit_test_transform_plus()
{
hipError_t cudaStatus = hipSuccess;
bool succeeded = true;
char func_name[256];
char err_msg[1024];
{
bool failed = false;
strcpy(func_name, "add_two_vector");
h_var_t h_acce;
h_var_t h_acceGasDrag;
h_acce.resize(10 * 4);
h_acceGasDrag.resize(3 * 4);
for (int i = 0; i < 10*4; i++ ) {
h_acce[i] = 0.0;
}
for (int i = 0; i < 3*4; i++ ) {
h_acceGasDrag[i] = 1.0;
}
d_var_t acce = h_acce;
d_var_t acceGasDrag = h_acceGasDrag;
int_t n = acceGasDrag.size();
// 1 star + 1 gp + 3 rp
int offset = 5 * 4;
hipLaunchKernelGGL(( add_two_vector), dim3(1), dim3(n), 0, 0, n, (var_t*)(acce.data().get() + offset), (var_t*)acceGasDrag.data().get());
cudaStatus = HANDLE_ERROR(hipGetLastError());
if (cudaStatus != hipSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
h_acce = acce;
for (int i = 0; i < 10; i++ ) {
int idx = 4*i;
printf("h_acce[%d] = %10lf, %10lf, %10lf, %10lf\n", idx, h_acce[idx], h_acce[idx+1], h_acce[idx+2], h_acce[idx+3]);
}
}
return cudaStatus;
}
int main(int argc, const char** argv)
{
hipError_t cudaStatus = hipSuccess;
int result = 0;
char func_name[256];
char err_msg[1024];
{
strcpy(func_name, "unit_test_cpy_gas_disc_to_dev");
cudaStatus = unit_test_cpy_gas_disc_to_dev();
if (hipSuccess == cudaStatus) {
sprintf(err_msg, "The unit test(s) of the %s() function passed.", func_name);
cout << endl << err_msg << endl;
}
else {
sprintf(err_msg, "The unit test(s) of the %s() function failed.", func_name);
cout << endl << err_msg << endl;
}
}
{
strcpy(func_name, "unit_test_transform_plus");
cudaStatus = unit_test_transform_plus();
if (hipSuccess == cudaStatus) {
sprintf(err_msg, "The unit test(s) of the %s() function passed.", func_name);
cout << endl << err_msg << endl;
}
else {
sprintf(err_msg, "The unit test(s) of the %s() function failed.", func_name);
cout << endl << err_msg << endl;
}
}
return result;
}
|
d7255cb50e713e38a18bd7156daccc492d6f9319.cu
|
// includes, system
#include <ctime>
#include <iomanip>
#include <iostream>
#include <fstream>
// includes CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// includes Thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include "config.h"
#include "Constants.h"
#include "gas_disk.h"
#include "nbody.h"
#include "nbody_exception.h"
#include "ode.h"
#include "options.h"
using namespace std;
static cudaError_t HandleError(cudaError_t cudaStatus, const char *file, int line)
{
if (cudaSuccess != cudaStatus) {
printf( "%s in %s at line %d\n", cudaGetErrorString( cudaStatus ), file, line );
return cudaStatus;
}
return cudaStatus;
}
#define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__))
__global__
void print_gas_disc(gas_disk *gasDisk)
{
printf("eta: %10lf, %10lf\n", gasDisk->eta.x, gasDisk->eta.y);
printf("rho: %10lf, %10lf\n", gasDisk->rho.x, gasDisk->rho.y);
printf("sch: %10lf, %10lf\n", gasDisk->sch.x, gasDisk->sch.y);
printf("tau: %10lf, %10lf\n", gasDisk->tau.x, gasDisk->tau.y);
}
cudaError_t unit_test_cpy_gas_disc_to_dev()
{
cudaError_t cudaStatus = cudaSuccess;
bool succeeded = true;
char func_name[256];
char err_msg[1024];
{
bool failed = false;
strcpy(func_name, "unit_test_cpy_gas_disc_to_dev");
var2_t eta = {2.0e-3, 1.0/2.0 };
var2_t rho = {1.0e-9, -11.0/4.0 }; // g / cm^3
var2_t sch = {5.0e-2, 5.0/4.0 };
var2_t tau = {2.0/3.0, 2.0 };
rho.x *= Constants::GramPerCm3ToSolarPerAu3; // M_sun / AU^3
gas_disk* gasDisk;
gas_disk* d_gasDisk;
gasDisk = new gas_disk(rho, sch, eta, tau);
cout << "gasDisk: " << endl;
cout << *gasDisk;
cudaStatus = HANDLE_ERROR(cudaMalloc((void**)&d_gasDisk, sizeof(gas_disk)));
if (cudaStatus != cudaSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
cudaStatus = HANDLE_ERROR(cudaMemcpy(d_gasDisk, gasDisk, sizeof(gas_disk), cudaMemcpyHostToDevice ));
if (cudaStatus != cudaSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
print_gas_disc<<<1,1>>>(d_gasDisk);
cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaStatus != cudaSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
cudaFree(d_gasDisk);
delete gasDisk;
}
return cudaStatus;
}
// a = a + b
__global__
void add_two_vector(int_t n, var_t *a, const var_t *b)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (n > tid) {
a[tid] += b[tid];
}
}
cudaError_t unit_test_transform_plus()
{
cudaError_t cudaStatus = cudaSuccess;
bool succeeded = true;
char func_name[256];
char err_msg[1024];
{
bool failed = false;
strcpy(func_name, "add_two_vector");
h_var_t h_acce;
h_var_t h_acceGasDrag;
h_acce.resize(10 * 4);
h_acceGasDrag.resize(3 * 4);
for (int i = 0; i < 10*4; i++ ) {
h_acce[i] = 0.0;
}
for (int i = 0; i < 3*4; i++ ) {
h_acceGasDrag[i] = 1.0;
}
d_var_t acce = h_acce;
d_var_t acceGasDrag = h_acceGasDrag;
int_t n = acceGasDrag.size();
// 1 star + 1 gp + 3 rp
int offset = 5 * 4;
add_two_vector<<<1, n>>>(n, (var_t*)(acce.data().get() + offset), (var_t*)acceGasDrag.data().get());
cudaStatus = HANDLE_ERROR(cudaGetLastError());
if (cudaStatus != cudaSuccess) {
sprintf(err_msg, "\t%30s() function failed at line %d.", func_name, __LINE__);
cerr << err_msg << endl;
failed = true;
}
h_acce = acce;
for (int i = 0; i < 10; i++ ) {
int idx = 4*i;
printf("h_acce[%d] = %10lf, %10lf, %10lf, %10lf\n", idx, h_acce[idx], h_acce[idx+1], h_acce[idx+2], h_acce[idx+3]);
}
}
return cudaStatus;
}
int main(int argc, const char** argv)
{
cudaError_t cudaStatus = cudaSuccess;
int result = 0;
char func_name[256];
char err_msg[1024];
{
strcpy(func_name, "unit_test_cpy_gas_disc_to_dev");
cudaStatus = unit_test_cpy_gas_disc_to_dev();
if (cudaSuccess == cudaStatus) {
sprintf(err_msg, "The unit test(s) of the %s() function passed.", func_name);
cout << endl << err_msg << endl;
}
else {
sprintf(err_msg, "The unit test(s) of the %s() function failed.", func_name);
cout << endl << err_msg << endl;
}
}
{
strcpy(func_name, "unit_test_transform_plus");
cudaStatus = unit_test_transform_plus();
if (cudaSuccess == cudaStatus) {
sprintf(err_msg, "The unit test(s) of the %s() function passed.", func_name);
cout << endl << err_msg << endl;
}
else {
sprintf(err_msg, "The unit test(s) of the %s() function failed.", func_name);
cout << endl << err_msg << endl;
}
}
return result;
}
|
618cd82d840f6e3511af9d27404ef174779d54e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop
*/
#define KERNEL_DBG_TRACE false
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/barrier.h"
#include "cutlass/arch/reg_reconfig.h"
using namespace cute;
using namespace cutlass;
//////////////////// KERNEL /////////////////////////
template <uint32_t Stages, typename ClusterShape, typename PingPongBarrier>
struct SharedStorage
{
typename cutlass::PipelineTmaAsync<Stages, ClusterShape>::SharedStorage pipeline_storage;
typename PingPongBarrier::SharedStorage pingpong_storage;
};
template <typename ClusterShape, uint32_t Stages>
struct CollectiveSimulation {
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>;
using PipelineState = typename cutlass::PipelineState<Stages>;
CUTLASS_DEVICE
static void
dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations) {
uint32_t const per_cta_bytes = sizeof(uint32_t);
int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
int lane_predicate = cute::elect_one_sync();
if (warp_idx_in_warpgroup==0 && lane_predicate) {
int tma_k_prologue = min(Stages, num_iterations);
// Simulating Prologue TMA Loads
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < tma_k_prologue; ++i) {
pipeline.producer_acquire(tile_start_state_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(tile_start_state_pipe, per_cta_bytes);
++tile_start_state_pipe;
}
int tma_k_iter = num_iterations - tma_k_prologue;
PipelineState wr_pipe = tile_start_state_pipe;
// Simulating Mainloop TMA Loads
CUTE_NO_UNROLL
for ( ; tma_k_iter > 0; --tma_k_iter){
pipeline.producer_acquire(wr_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(wr_pipe, per_cta_bytes);
// Advance write stage
++wr_pipe;
}
}
}
CUTLASS_DEVICE
static void
math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations, int* data_ptr) {
PipelineState rd_pipe = tile_start_state_pipe;
PipelineState release_pipe = rd_pipe;
// simulates accumulators + extra reg. pressure
int arr[168];
// Init Shared Memory read stages & PhaseBit
static constexpr uint32_t K_PIPE_MMAS = 1;
static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight");
// Total number of gemm iterations
auto gemm_k_iterations = num_iterations;
// Simulating Prologue MMAs
int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < mma_k_prologue; ++iter) {
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
++rd_pipe;
}
gemm_k_iterations -= mma_k_prologue;
// Simulating Mainloop MMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; gemm_k_iterations > 0; --gemm_k_iterations) {
/// Wait on the rd_pipe stage / phase
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
// Dummy op - which will never happen
// But simulates high register usage.
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
arr[i] += data_ptr[i];
}
}
pipeline.consumer_release(release_pipe);
// Advance stages
++rd_pipe;
++release_pipe;
}
// Dummy op - which will never happen
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
data_ptr[i] = arr[i];
}
}
// Tail Loop
for (int i = 0; i < K_PIPE_MMAS; ++i){
pipeline.consumer_release(release_pipe);
++release_pipe;
}
}
};
struct KernelParams
{
uint32_t num_iterations;
int tiles_per_cluster;
int* data_ptr;
};
// Goal of this kernel is to complete deadlock-free
template <typename ClusterShape, uint32_t Stages>
__launch_bounds__(384, 1)
__global__ static
void pipeline_device(KernelParams params)
{
extern __shared__ char shared_memory[];
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>;
using PipelineState = typename cutlass::PipelineState<Stages>;
/* One for Mainloop and one for Epilogue */
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
using PingPongBarrier = typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>;
using SharedStorage = SharedStorage<Stages, ClusterShape, PingPongBarrier>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id
int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0);
int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup;
dim3 block_id_in_cluster = cute::block_id_in_cluster();
auto cluster_shape = ClusterShape{};
// #Producers = #RowsInCluster + #ColsInCluster - 1
uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1;
uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers);
// mbarrier.init
typename MainloopPipeline::Params pipeline_params;
pipeline_params.transaction_bytes = TmaTransactionBytes;
if (warp_group_idx == 0) {
pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
else {
pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
pipeline_params.is_leader = warp_group_thread_idx == 0;
pipeline_params.num_consumers = NumThreadsPerWarpGroup;
MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params);
PipelineState tile_start_state_pipe;
int tiles_per_cluster = params.tiles_per_cluster;
/* Offset pipeline start state for Math WG 2 */
if (warp_group_idx == 2) {
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
typename PingPongBarrier::Params pingpong_params;
pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate
pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group
PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params);
__syncthreads();
// Ensure All CTAs in Cluster have completed init before issuing commits
cute::cluster_arrive_relaxed();
cute::cluster_wait();
// Producer/DMA WarpGroup
if (warp_group_idx == 0) {
cutlass::arch::warpgroup_reg_dealloc<40>();
// For the DMA (prologue) - we start with an opposite phase - since we skip all waits
// i.e., we know that the buffer is indeed empty
PipelineState tile_prologue_state_pipe = make_producer_start_state<MainloopPipeline>();
while (tiles_per_cluster > 0) {
CollectiveSimulation<ClusterShape,Stages>::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations);
// Update pipeline state for next persistent tile
tile_prologue_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
}
// Math WarpGropups
if(warp_group_idx == 1 || warp_group_idx == 2) {
cutlass::arch::warpgroup_reg_alloc<232>();
while (tiles_per_cluster > 0) {
// MMA
math_wg_barrier.wait();
CollectiveSimulation<ClusterShape,Stages>::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr);
math_wg_barrier.arrive();
// Epilogue
math_wg_barrier.wait();
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
math_wg_barrier.arrive();
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations * 2);
tiles_per_cluster -= 2;
}
}
// Makes sure remote SMEM doesn't get destroyed
cute::cluster_arrive_relaxed();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
/// Device NT GMMA + TMA specialized
template<uint32_t Stages_, typename ClusterShape_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t kBlockSize = 128 * 3;
using ClusterShape = ClusterShape_;
//
// Methods
//
// Run CuTe GEMM kernel
hipError_t run(uint32_t const kNumIters,
hipStream_t stream = 0) {
float elapsed_ms = 0.0f;
// Pipeline (multistage pipeline)
auto num_stages = Int<Stages>{};
auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{};
//
// Configure and launch
//
int iterations = 1;
hipEvent_t events[2];
hipError_t result;
for (hipEvent_t & event : events) {
result = hipEventCreate(&event);
if (result != hipSuccess) {
std::cerr << "Error: Failed to create event.";
return result;
}
}
result = hipEventRecord(events[0]);
if (result != hipSuccess) {
std::cerr << "Error: Failed to record start event.";
return result;
}
for (int iter = 0; iter < iterations; ++iter) {
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
int smem_size = int(sizeof(SharedStorage<Stages, decltype(cluster_shape),
typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>>));
result = hipFuncSetAttribute(
pipeline_device<decltype(cluster_shape), Stages>,
hipFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with kBlockSize threads per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(kBlockSize,1,1);
int tiles_per_cluster = (kNumIters % 10) + 1;
printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster);
const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>;
KernelParams params{kNumIters, tiles_per_cluster, nullptr};
void *kernel_params[] = {¶ms};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
}
result = hipEventRecord(events[1]);
if (result != hipSuccess) {
std::cerr << "Error: Failed to record stop event.";
return result;
}
result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cerr << "Error: hipDeviceSynchronize() failed" << std::endl;
return result;
}
result = hipEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != hipSuccess) {
std::cerr << "Failed to create event.";
return result;
}
for (hipEvent_t & event : events) {
(void)hipEventDestroy(event);
}
return hipSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
|
618cd82d840f6e3511af9d27404ef174779d54e7.cu
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit test for the PipelineTmaAsync class used in a WarpSpecialized Persistent loop
*/
#define KERNEL_DBG_TRACE false
#include "../common/cutlass_unit_test.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/arch/cluster_sm90.hpp>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/cluster_launch.hpp>
#include "cutlass/core_io.h"
#include "cutlass/util/print_error.hpp"
#include "cutlass/util/GPU_Clock.hpp"
#include "testbed.h"
#include "cutlass/pipeline/pipeline.hpp"
#include "cutlass/arch/barrier.h"
#include "cute/arch/cluster_sm90.hpp"
#include "cutlass/arch/barrier.h"
#include "cutlass/arch/reg_reconfig.h"
using namespace cute;
using namespace cutlass;
//////////////////// KERNEL /////////////////////////
template <uint32_t Stages, typename ClusterShape, typename PingPongBarrier>
struct SharedStorage
{
typename cutlass::PipelineTmaAsync<Stages, ClusterShape>::SharedStorage pipeline_storage;
typename PingPongBarrier::SharedStorage pingpong_storage;
};
template <typename ClusterShape, uint32_t Stages>
struct CollectiveSimulation {
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>;
using PipelineState = typename cutlass::PipelineState<Stages>;
CUTLASS_DEVICE
static void
dma_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations) {
uint32_t const per_cta_bytes = sizeof(uint32_t);
int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
int lane_predicate = cute::elect_one_sync();
if (warp_idx_in_warpgroup==0 && lane_predicate) {
int tma_k_prologue = min(Stages, num_iterations);
// Simulating Prologue TMA Loads
CUTLASS_PRAGMA_UNROLL
for(int i = 0; i < tma_k_prologue; ++i) {
pipeline.producer_acquire(tile_start_state_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(tile_start_state_pipe, per_cta_bytes);
++tile_start_state_pipe;
}
int tma_k_iter = num_iterations - tma_k_prologue;
PipelineState wr_pipe = tile_start_state_pipe;
// Simulating Mainloop TMA Loads
CUTE_NO_UNROLL
for ( ; tma_k_iter > 0; --tma_k_iter){
pipeline.producer_acquire(wr_pipe);
// Simulating cp.async.bulk.tensor behavior
pipeline.producer_commit(wr_pipe, per_cta_bytes);
// Advance write stage
++wr_pipe;
}
}
}
CUTLASS_DEVICE
static void
math_wg_simulation(MainloopPipeline pipeline, PipelineState tile_start_state_pipe,
uint32_t const num_iterations, int* data_ptr) {
PipelineState rd_pipe = tile_start_state_pipe;
PipelineState release_pipe = rd_pipe;
// simulates accumulators + extra reg. pressure
int arr[168];
// Init Shared Memory read stages & PhaseBit
static constexpr uint32_t K_PIPE_MMAS = 1;
static_assert( K_PIPE_MMAS < Stages, "ERROR : Too many MMAs in flight");
// Total number of gemm iterations
auto gemm_k_iterations = num_iterations;
// Simulating Prologue MMAs
int mma_k_prologue = min(K_PIPE_MMAS, gemm_k_iterations);
CUTLASS_PRAGMA_UNROLL
for (int iter = 0; iter < mma_k_prologue; ++iter) {
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
++rd_pipe;
}
gemm_k_iterations -= mma_k_prologue;
// Simulating Mainloop MMAs
CUTLASS_PRAGMA_NO_UNROLL
for ( ; gemm_k_iterations > 0; --gemm_k_iterations) {
/// Wait on the rd_pipe stage / phase
pipeline.consumer_wait(rd_pipe);
warpgroup_arrive();
// GMMA would typically happen here
// Dummy op - which will never happen
// But simulates high register usage.
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
arr[i] += data_ptr[i];
}
}
pipeline.consumer_release(release_pipe);
// Advance stages
++rd_pipe;
++release_pipe;
}
// Dummy op - which will never happen
CUTE_UNROLL
for(int i = 0; i < 168; ++i){
if (threadIdx.x > 384){
data_ptr[i] = arr[i];
}
}
// Tail Loop
for (int i = 0; i < K_PIPE_MMAS; ++i){
pipeline.consumer_release(release_pipe);
++release_pipe;
}
}
};
struct KernelParams
{
uint32_t num_iterations;
int tiles_per_cluster;
int* data_ptr;
};
// Goal of this kernel is to complete deadlock-free
template <typename ClusterShape, uint32_t Stages>
__launch_bounds__(384, 1)
__global__ static
void pipeline_device(KernelParams params)
{
extern __shared__ char shared_memory[];
using MainloopPipeline = typename cutlass::PipelineTmaAsync<Stages, ClusterShape>;
using PipelineState = typename cutlass::PipelineState<Stages>;
/* One for Mainloop and one for Epilogue */
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
using PingPongBarrier = typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>;
using SharedStorage = SharedStorage<Stages, ClusterShape, PingPongBarrier>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id
int warp_group_idx = __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0);
int warp_group_thread_idx = threadIdx.x % NumThreadsPerWarpGroup;
dim3 block_id_in_cluster = cute::block_id_in_cluster();
auto cluster_shape = ClusterShape{};
// #Producers = #RowsInCluster + #ColsInCluster - 1
uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1;
uint32_t const TmaTransactionBytes = static_cast<uint32_t>(sizeof(uint32_t) * NumProducers);
// mbarrier.init
typename MainloopPipeline::Params pipeline_params;
pipeline_params.transaction_bytes = TmaTransactionBytes;
if (warp_group_idx == 0) {
pipeline_params.role = MainloopPipeline::ThreadCategory::Producer;
}
else {
pipeline_params.role = MainloopPipeline::ThreadCategory::Consumer;
}
pipeline_params.is_leader = warp_group_thread_idx == 0;
pipeline_params.num_consumers = NumThreadsPerWarpGroup;
MainloopPipeline pipeline(shared_storage.pipeline_storage, pipeline_params);
PipelineState tile_start_state_pipe;
int tiles_per_cluster = params.tiles_per_cluster;
/* Offset pipeline start state for Math WG 2 */
if (warp_group_idx == 2) {
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
typename PingPongBarrier::Params pingpong_params;
pingpong_params.group_id = warp_group_idx - 1; // Since DMA Warp Group Idx 0 will not participate
pingpong_params.group_size = NumThreadsPerWarpGroup; // Number of threads / participants in a group
PingPongBarrier math_wg_barrier(shared_storage.pingpong_storage, pingpong_params);
__syncthreads();
// Ensure All CTAs in Cluster have completed init before issuing commits
cute::cluster_arrive_relaxed();
cute::cluster_wait();
// Producer/DMA WarpGroup
if (warp_group_idx == 0) {
cutlass::arch::warpgroup_reg_dealloc<40>();
// For the DMA (prologue) - we start with an opposite phase - since we skip all waits
// i.e., we know that the buffer is indeed empty
PipelineState tile_prologue_state_pipe = make_producer_start_state<MainloopPipeline>();
while (tiles_per_cluster > 0) {
CollectiveSimulation<ClusterShape,Stages>::dma_wg_simulation(pipeline, tile_prologue_state_pipe, params.num_iterations);
// Update pipeline state for next persistent tile
tile_prologue_state_pipe.advance(params.num_iterations);
tiles_per_cluster--;
}
}
// Math WarpGropups
if(warp_group_idx == 1 || warp_group_idx == 2) {
cutlass::arch::warpgroup_reg_alloc<232>();
while (tiles_per_cluster > 0) {
// MMA
math_wg_barrier.wait();
CollectiveSimulation<ClusterShape,Stages>::math_wg_simulation(pipeline, tile_start_state_pipe, params.num_iterations, params.data_ptr);
math_wg_barrier.arrive();
// Epilogue
math_wg_barrier.wait();
// Simulates long running stage
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
__nanosleep(100000);
#endif
math_wg_barrier.arrive();
// Update pipeline state for next persistent tile
tile_start_state_pipe.advance(params.num_iterations * 2);
tiles_per_cluster -= 2;
}
}
// Makes sure remote SMEM doesn't get destroyed
cute::cluster_arrive_relaxed();
cute::cluster_wait();
}
/////////////////////////////////////////////////////
/// Device NT GMMA + TMA specialized
template<uint32_t Stages_, typename ClusterShape_>
struct PipelineTest {
//
// Data members
//
static constexpr uint32_t Stages = Stages_;
static constexpr uint32_t kBlockSize = 128 * 3;
using ClusterShape = ClusterShape_;
//
// Methods
//
// Run CuTe GEMM kernel
cudaError_t run(uint32_t const kNumIters,
cudaStream_t stream = 0) {
float elapsed_ms = 0.0f;
// Pipeline (multistage pipeline)
auto num_stages = Int<Stages>{};
auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{};
//
// Configure and launch
//
int iterations = 1;
cudaEvent_t events[2];
cudaError_t result;
for (cudaEvent_t & event : events) {
result = cudaEventCreate(&event);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to create event.";
return result;
}
}
result = cudaEventRecord(events[0]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record start event.";
return result;
}
for (int iter = 0; iter < iterations; ++iter) {
constexpr int StagesPerMathWarpGroup = 2;
constexpr int MathWarpGroupCountPersistent = 2;
int smem_size = int(sizeof(SharedStorage<Stages, decltype(cluster_shape),
typename cutlass::OrderedSequenceBarrier<StagesPerMathWarpGroup, MathWarpGroupCountPersistent>>));
result = cudaFuncSetAttribute(
pipeline_device<decltype(cluster_shape), Stages>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
smem_size);
// Launch a single Cluster, with kBlockSize threads per CTA
dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1);
dim3 dimBlock(kBlockSize,1,1);
int tiles_per_cluster = (kNumIters % 10) + 1;
printf("Persistent version: Tiles per Cluster = %d\n", tiles_per_cluster);
const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>;
KernelParams params{kNumIters, tiles_per_cluster, nullptr};
void *kernel_params[] = {¶ms};
cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params);
}
result = cudaEventRecord(events[1]);
if (result != cudaSuccess) {
std::cerr << "Error: Failed to record stop event.";
return result;
}
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl;
return result;
}
result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]);
if (result != cudaSuccess) {
std::cerr << "Failed to create event.";
return result;
}
for (cudaEvent_t & event : events) {
(void)cudaEventDestroy(event);
}
return cudaSuccess;
}
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x1_Stage10) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>;
static constexpr uint32_t Stages = 10;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage5) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 5;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x1_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster1x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster2x4_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage2) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 2;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
TEST(SM90_Verify_PipelineTmaAsync_WS_Persistent, Cluster4x2_Stage7) {
Options options;
using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>;
static constexpr uint32_t Stages = 7;
using Test = PipelineTest<Stages, ClusterShape>;
Testbed<Test> testbed(options);
EXPECT_TRUE(testbed.verification());
}
#endif
|
419c56f6263c13d20710e768a40954f66242a248.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transpose.cuh"
__global__ void Transpose(const float* __restrict__ source, float* destination, size_t width, size_t height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
size_t size = width * height;
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
// Let's read all the tile rows. Remember that each block reads
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
tile[threadIdx.y + i][threadIdx.x] = source[index_in + i * width];
}
cg::sync(cta);
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
destination[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
|
419c56f6263c13d20710e768a40954f66242a248.cu
|
#include "transpose.cuh"
__global__ void Transpose(const float* __restrict__ source, float* destination, size_t width, size_t height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
size_t size = width * height;
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
// Let's read all the tile rows. Remember that each block reads
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
if (xIndex < width && yIndex < height)
tile[threadIdx.y + i][threadIdx.x] = source[index_in + i * width];
}
cg::sync(cta);
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
if (xIndex < height && yIndex < width)
destination[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.