hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
32bb8b13b2fb1a271794f999617039a797df457b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <libInterpolate\cuInterpolate.h>
#include <libInterpolate\interval_lib.h>
#include <libInterpolate\cuInterval_kernel.cuh>
#include <libInterpolate\spBaseLag.h>
#include <helper_cuda.h>
#include <time.h>
//#include <thrust\host_vector.h>//NVCC
//#include <thrust\transform.h>
//
//T
//Mmalloc
//@_table:
//@_posIn
//@_DataOut:
template< unsigned int T>
float cuInterpolate(HBXDef::cuTable<T>* _table,
HBXDef::cuLocation<T>* _posIn,
HBXDef::UserCalPrec* _DataOut,
unsigned int N)
{
using namespace HBXDef;
using namespace HBXDef::HbxCuDef;
checkCudaErrors(hipSetDevice(0));
float elapsedTime; //
//shared memory
if ( _table->size() < 1024*16 )
{
//L1 32kL1
hipFuncSetCacheConfig(cuInter_kernel<T>, hipFuncCachePreferL1);
}
else if( _table->size()< 1024* 32 && _table->size() > 1024 * 16)
{
//32-32L1
hipFuncSetCacheConfig(cuInter_kernel<T>, hipFuncCachePreferEqual);
}
else if (_table->size()< 1024 * 48 && _table->size() > 1024 * 32)
{
//16-48L1
hipFuncSetCacheConfig(cuInter_kernel<T>, hipFuncCachePreferShared);
}
//
// hipDeviceSetLimit(hipLimitStackSize, 8192);
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));//
//important
cuInter_kernel<T> << < GRIDSIZE, BLOCKSIZE, MAX_SHARED, 0 >> > (_table, _posIn, _DataOut, N);
// cuInter_TEST<T> << < GRIDSIZE, BLOCKSIZE, MAX_SHARED, 0 >> > (_table, _posIn, _DataOut);
hipEventRecord(stop, 0); //
hipDeviceSynchronize(); //
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);//
// std::cout << ":" << elapsedTime << std::endl;
return elapsedTime;
};
//CUDA
template< unsigned int T>
float Interpolate_cpu(HBXDef::cuTable<T>* _CuTable,
HBXDef::cuLocation<T>* h_loc,
HBXDef::UserCalPrec* h_result,
unsigned int _num)
{
//spCPU
baselag<T>* thelag = new baselag<T>(_CuTable, 0);
clock_t start, end;
double duration;
start = clock();
float rsum = 0.0;
float err = 0.0;
for (unsigned int i = 0; i < _num; i++)
{
float diff = 0;
h_result[i] = thelag->get(h_loc[i].m_Loc);
}
end = clock();
duration = (double)(end - start) / CLOCKS_PER_SEC;
std::cout << ":" << duration << std::endl;
return duration;
}
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
|
32bb8b13b2fb1a271794f999617039a797df457b.cu
|
#include <libInterpolate\cuInterpolate.h>
#include <libInterpolate\interval_lib.h>
#include <libInterpolate\cuInterval_kernel.cuh>
#include <libInterpolate\spBaseLag.h>
#include <helper_cuda.h>
#include <time.h>
//#include <thrust\host_vector.h>//可能导致NVCC编译器报错
//#include <thrust\transform.h>
//该函数仅完成核函数外部的程序优化,不涉及内存分配等问题
//T:表示维度
//M:表示malloc的方式
//@_table:插值表
//@_posIn:待插值点向量
//@_DataOut:插值所得数值向量
template< unsigned int T>
float cuInterpolate(HBXDef::cuTable<T>* _table,
HBXDef::cuLocation<T>* _posIn,
HBXDef::UserCalPrec* _DataOut,
unsigned int N)
{
using namespace HBXDef;
using namespace HBXDef::HbxCuDef;
checkCudaErrors(cudaSetDevice(0));
float elapsedTime; //花费时长
//查看该表的大小以便控制shared memory的大小
if ( _table->size() < 1024*16 )
{
//偏重于L1 32k缓存。因为L1缓存内可以放置相关的临时变量
cudaFuncSetCacheConfig(cuInter_kernel<T>, cudaFuncCachePreferL1);
}
else if( _table->size()< 1024* 32 && _table->size() > 1024 * 16)
{
//32-32缓存。因为L1缓存内可以放置相关的临时变量
cudaFuncSetCacheConfig(cuInter_kernel<T>, cudaFuncCachePreferEqual);
}
else if (_table->size()< 1024 * 48 && _table->size() > 1024 * 32)
{
//16-48缓存。因为L1缓存内可以放置相关的临时变量
cudaFuncSetCacheConfig(cuInter_kernel<T>, cudaFuncCachePreferShared);
}
//保证栈有足够的空间存放非内联函数
// cudaDeviceSetLimit(cudaLimitStackSize, 8192);
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));//开始计时
//核函数,important
cuInter_kernel<T> << < GRIDSIZE, BLOCKSIZE, MAX_SHARED, 0 >> > (_table, _posIn, _DataOut, N);
// cuInter_TEST<T> << < GRIDSIZE, BLOCKSIZE, MAX_SHARED, 0 >> > (_table, _posIn, _DataOut);
cudaEventRecord(stop, 0); //计时结束
cudaThreadSynchronize(); //线程同步
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);//获取用时
// std::cout << "核函数执行时间:" << elapsedTime << std::endl;
return elapsedTime;
};
//CUDA插值计算校验程序
template< unsigned int T>
float Interpolate_cpu(HBXDef::cuTable<T>* _CuTable,
HBXDef::cuLocation<T>* h_loc,
HBXDef::UserCalPrec* h_result,
unsigned int _num)
{
//在此调用sp的CPU版插值类
baselag<T>* thelag = new baselag<T>(_CuTable, 0);
clock_t start, end;
double duration;
start = clock();
float rsum = 0.0;
float err = 0.0;
for (unsigned int i = 0; i < _num; i++)
{
float diff = 0;
h_result[i] = thelag->get(h_loc[i].m_Loc);
}
end = clock();
duration = (double)(end - start) / CLOCKS_PER_SEC;
std::cout << "串行版执行时间:" << duration << std::endl;
return duration;
}
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
|
64cfccebe924df6601f52ef011ba48dcdb790679.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <helper_cuda.h>
#include <GL/glew.h>
GLfloat* D_cordinatePos;
GLfloat* D_colors;
unsigned int* D_indices;
unsigned int* D_indicesNew;
hipStream_t iterateStream, indexStream;
//Calculates the indices for a the next iteration of the fratal
extern "C" __global__ void indicesKernel(unsigned int* indices, unsigned int* indicesNew, unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size * 2 -1){
if((i%2) == 0){
indicesNew[i] = indices[i/2];
}
else{
indicesNew[i] = size + i/2;
}
}
}
//Copies the newest indices to the area of devices memory that is used by OpenGL, much faster than MemCpy
extern "C" __global__ void indicesCopyKernel(unsigned int* deviceIndicesPtr,unsigned int* indices, unsigned int* indicesNew, unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size){
deviceIndicesPtr[i] = indicesNew[i];
}
}
//almost the same as serial code, includes some control flow optimizations
extern "C" __global__ void iterateKernel(GLfloat* deviceVertPtr,GLfloat* deviceColorPtr,unsigned int* indices,const unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size-1){
GLfloat startX = deviceVertPtr[indices[i]*2];
GLfloat startY = deviceVertPtr[(indices[i]*2)+1];
GLfloat nextX = deviceVertPtr[indices[i+1] * 2];
GLfloat nextY = deviceVertPtr[indices[i+1] * 2 + 1];
GLfloat newStartX;
GLfloat newStartY;
if(startY == nextY){
if( (nextX > startX && (i%2) == 0) || (nextX <= startX && (i%2) == 1)){
//cp 1
newStartX = (startX + nextX)/2;
newStartY = startY + abs(nextX - startX)/2;
}
else{
//cp 2
newStartX = (startX + nextX)/2;
newStartY = startY - abs(nextX - startX)/2;
}
}
else if(startX == nextX){
if((nextY > startY && (i%2) == 0) || (nextY <= startY && (i%2) == 1)){
//cp 3
newStartX = startX - abs(nextY - startY)/2;
newStartY = (nextY + startY)/2;
}
else{
//cp 4
newStartX = startX + abs(nextY - startY)/2;
newStartY = (nextY + startY)/2;
}
}
else{
if(( ( (nextX > startX && nextY > startY) || (nextX < startX && nextY < startY)) && (i%2) == 0)
|| (!((nextX > startX && nextY > startY) || (nextX < startX && nextY < startY)) && (i%2) == 1)){
//cp 5
newStartX = startX;
newStartY = nextY;
}
else{
//cp 6
newStartX = nextX;
newStartY = startY;
}
}
deviceVertPtr[(size+i)*2] = newStartX;
deviceVertPtr[(size+i)*2 +1] = newStartY;
float inc = 2.0/size ;
deviceColorPtr[(size+i)*3] = 2.0-(inc*i);
deviceColorPtr[(size+i)*3+1] = inc*i;
deviceColorPtr[(size+i)*3+2] = 0;
}
}
//kernel launcher function does not hipMemcpy at all.
//this function is called repeatedly.
//color and cordinate data is automaticly stored in device memory locations used by OpenGL.
// VBO index array must be copeied to an OpeGL location seperatly because the operation cannot be done in place easily.
extern "C" int kernelLauncher(GLfloat* deviceVertPtr,GLfloat* deviceColorPtr,unsigned int* deviceIndicesPtr,unsigned int size) {
unsigned int* temp;
temp = D_indices;
D_indices = D_indicesNew;
D_indicesNew = temp;
int blockSize = 1024;
int gridSize = ((size-1)/1024)+1;
hipLaunchKernelGGL(( iterateKernel), dim3(gridSize), dim3(blockSize),0,iterateStream, deviceVertPtr,deviceColorPtr,D_indices,size);
gridSize = ((size*2-1)/1024)+1;
hipLaunchKernelGGL(( indicesKernel), dim3(gridSize), dim3(blockSize),0,indexStream, D_indices,D_indicesNew,size);
hipDeviceSynchronize();
return 0;
}
// kernel init functon mallocs and initializes indicies memory location.
// not needed for color and position data because OpenGL buffers and cuda register buffers does this for us.
void kernelInit(unsigned int* H_indices,unsigned int size, unsigned int totalSize) {
hipStreamCreate(&iterateStream);
hipStreamCreate(&indexStream);
hipMalloc(&D_indices, totalSize*sizeof(unsigned int));
hipMalloc(&D_indicesNew, totalSize*sizeof(unsigned int));
hipMemcpy(D_indicesNew, H_indices, size * sizeof(unsigned int), hipMemcpyHostToDevice);
}
//called only once
//copies over the indices array and frees memory.
void kernelClose(unsigned int* deviceIndicesPtr,unsigned int* H_indices,unsigned int size){
int blockSize = 1024;
int gridSize = ((size*2-1)/1024)+1;
hipLaunchKernelGGL(( indicesCopyKernel), dim3(gridSize), dim3(blockSize),0,indexStream, deviceIndicesPtr,D_indices,D_indicesNew,size);
hipDeviceSynchronize();
hipFree(D_indices);
hipFree(D_indicesNew);
}
|
64cfccebe924df6601f52ef011ba48dcdb790679.cu
|
#include <helper_cuda.h>
#include <GL/glew.h>
GLfloat* D_cordinatePos;
GLfloat* D_colors;
unsigned int* D_indices;
unsigned int* D_indicesNew;
cudaStream_t iterateStream, indexStream;
//Calculates the indices for a the next iteration of the fratal
extern "C" __global__ void indicesKernel(unsigned int* indices, unsigned int* indicesNew, unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size * 2 -1){
if((i%2) == 0){
indicesNew[i] = indices[i/2];
}
else{
indicesNew[i] = size + i/2;
}
}
}
//Copies the newest indices to the area of devices memory that is used by OpenGL, much faster than MemCpy
extern "C" __global__ void indicesCopyKernel(unsigned int* deviceIndicesPtr,unsigned int* indices, unsigned int* indicesNew, unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size){
deviceIndicesPtr[i] = indicesNew[i];
}
}
//almost the same as serial code, includes some control flow optimizations
extern "C" __global__ void iterateKernel(GLfloat* deviceVertPtr,GLfloat* deviceColorPtr,unsigned int* indices,const unsigned int size){
int i = blockDim.x * blockIdx.x+threadIdx.x;
if(i < size-1){
GLfloat startX = deviceVertPtr[indices[i]*2];
GLfloat startY = deviceVertPtr[(indices[i]*2)+1];
GLfloat nextX = deviceVertPtr[indices[i+1] * 2];
GLfloat nextY = deviceVertPtr[indices[i+1] * 2 + 1];
GLfloat newStartX;
GLfloat newStartY;
if(startY == nextY){
if( (nextX > startX && (i%2) == 0) || (nextX <= startX && (i%2) == 1)){
//cp 1
newStartX = (startX + nextX)/2;
newStartY = startY + abs(nextX - startX)/2;
}
else{
//cp 2
newStartX = (startX + nextX)/2;
newStartY = startY - abs(nextX - startX)/2;
}
}
else if(startX == nextX){
if((nextY > startY && (i%2) == 0) || (nextY <= startY && (i%2) == 1)){
//cp 3
newStartX = startX - abs(nextY - startY)/2;
newStartY = (nextY + startY)/2;
}
else{
//cp 4
newStartX = startX + abs(nextY - startY)/2;
newStartY = (nextY + startY)/2;
}
}
else{
if(( ( (nextX > startX && nextY > startY) || (nextX < startX && nextY < startY)) && (i%2) == 0)
|| (!((nextX > startX && nextY > startY) || (nextX < startX && nextY < startY)) && (i%2) == 1)){
//cp 5
newStartX = startX;
newStartY = nextY;
}
else{
//cp 6
newStartX = nextX;
newStartY = startY;
}
}
deviceVertPtr[(size+i)*2] = newStartX;
deviceVertPtr[(size+i)*2 +1] = newStartY;
float inc = 2.0/size ;
deviceColorPtr[(size+i)*3] = 2.0-(inc*i);
deviceColorPtr[(size+i)*3+1] = inc*i;
deviceColorPtr[(size+i)*3+2] = 0;
}
}
//kernel launcher function does not cudaMemcpy at all.
//this function is called repeatedly.
//color and cordinate data is automaticly stored in device memory locations used by OpenGL.
// VBO index array must be copeied to an OpeGL location seperatly because the operation cannot be done in place easily.
extern "C" int kernelLauncher(GLfloat* deviceVertPtr,GLfloat* deviceColorPtr,unsigned int* deviceIndicesPtr,unsigned int size) {
unsigned int* temp;
temp = D_indices;
D_indices = D_indicesNew;
D_indicesNew = temp;
int blockSize = 1024;
int gridSize = ((size-1)/1024)+1;
iterateKernel<<<gridSize, blockSize,0,iterateStream>>>(deviceVertPtr,deviceColorPtr,D_indices,size);
gridSize = ((size*2-1)/1024)+1;
indicesKernel<<<gridSize, blockSize,0,indexStream>>>(D_indices,D_indicesNew,size);
cudaDeviceSynchronize();
return 0;
}
// kernel init functon mallocs and initializes indicies memory location.
// not needed for color and position data because OpenGL buffers and cuda register buffers does this for us.
void kernelInit(unsigned int* H_indices,unsigned int size, unsigned int totalSize) {
cudaStreamCreate(&iterateStream);
cudaStreamCreate(&indexStream);
cudaMalloc(&D_indices, totalSize*sizeof(unsigned int));
cudaMalloc(&D_indicesNew, totalSize*sizeof(unsigned int));
cudaMemcpy(D_indicesNew, H_indices, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
}
//called only once
//copies over the indices array and frees memory.
void kernelClose(unsigned int* deviceIndicesPtr,unsigned int* H_indices,unsigned int size){
int blockSize = 1024;
int gridSize = ((size*2-1)/1024)+1;
indicesCopyKernel<<<gridSize, blockSize,0,indexStream>>>(deviceIndicesPtr,D_indices,D_indicesNew,size);
cudaDeviceSynchronize();
cudaFree(D_indices);
cudaFree(D_indicesNew);
}
|
5ef5b47362839e904510196c319ffaace8ebbde2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <sys/time.h>
#include "../lib/bed.h"
#include "../lib/set_intersect.h"
#include "radixsort.h"
//#include "gpu.hpp"
#include "random.hpp"
#include "../lib/timer.h"
//#include "order_kernel.cu"
#include "set_intersect_cuda.h"
int main(int argc, char *argv[]) {
hipFree(NULL);
if (argc < 4) {
fprintf(stderr, "usage: order <u> <a> <b> <N>\n");
return 1;
}
int chrom_num = 24;
/***********************REPLACE WITH INPUT FILE************************/
char *chrom_names[] = {
"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
"chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16",
"chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"
};
/**********************************************************************/
struct chr_list *U, *A, *B;
char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3];
int reps = atoi(argv[4]);
if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) ||
( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) ||
( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) {
fprintf(stderr, "Error parsing bed files.\n");
return 1;
}
unsigned int max = add_offsets(U, chrom_num);
trim(U, A, chrom_num);
trim(U, B, chrom_num);
int A_size, B_size, U_size;
struct bed_line *U_array, *A_array, *B_array;
U_size = chr_array_from_list(U, &U_array, chrom_num);
A_size = chr_array_from_list(A, &A_array, chrom_num);
B_size = chr_array_from_list(B, &B_array, chrom_num);
unsigned int *A_key_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *A_val_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *B_key_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
unsigned int *B_val_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
/*
* In CUDA we can sort key value pairs,
* the key can be the offset, and the value can be the length
*/
set_start_len( U_array, U_size,
A_array, A_key_h, A_val_h, A_size );
set_start_len( U_array, U_size,
B_array, B_key_h, B_val_h, B_size );
// Move A and B to deviceB
unsigned int *A_key_d, *A_val_d, *B_key_d, *B_val_d;
hipMalloc((void **)&A_key_d, (A_size)*sizeof(unsigned int));
hipMalloc((void **)&A_val_d, (A_size)*sizeof(unsigned int));
hipMalloc((void **)&B_key_d, (B_size)*sizeof(unsigned int));
hipMalloc((void **)&B_val_d, (B_size)*sizeof(unsigned int));
start();
hipMemcpy(A_key_d, A_key_h, (A_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(A_val_d, A_val_h, (A_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(B_key_d, B_key_h, (B_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
hipMemcpy(B_val_d, B_val_h, (B_size) * sizeof(unsigned int),
hipMemcpyHostToDevice);
stop();
unsigned long memup_time = report();
int block_size = 256;
dim3 dimBlock(block_size);
// R will hold the results of the intersection, for each interval A[i],
// R[i] will be the number of intervals in B that A[i] intersects,
unsigned int *R_d;
hipMalloc((void **)&R_d, (A_size)*sizeof(unsigned int));
// *_key_d holds the start position, and *_val_d holds the length,
// the end position is *_key_d + *_val_d
//
// Each thread will search |reps| items in A, we will keep the blocksize
// fixed at 256, but we will need to adjust the grid size
dim3 dimGridSearch( ( A_size + 1) / (block_size * reps));
hipError_t err;
int z;
for (z = 0; z < 1000; z++) {
start();
hipLaunchKernelGGL(( intersection_b_search) , dim3(dimGridSearch),
dim3( dimBlock) , 0, 0, A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d, reps);
hipDeviceSynchronize();
stop();
printf("GM\t%ld\t", report());
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "GM search: %s.\n", hipGetErrorString( err) );
parallel_sum( R_d, block_size, A_size, 100 );
unsigned int x;
hipMemcpy(&x, R_d, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("%ld\n", x);
}
start();
// Sort A
nvRadixSort::RadixSort radixsortA(A_size, false);
radixsortA.sort((unsigned int*)A_key_d, (unsigned int*)A_val_d,
A_size, 32);
// Sort B
nvRadixSort::RadixSort radixsortB(B_size, false);
radixsortB.sort((unsigned int*)B_key_d, (unsigned int*)B_val_d,
B_size, 32);
hipDeviceSynchronize();
stop();
for (z = 0; z < 1000; z++) {
start();
hipLaunchKernelGGL(( intersection_b_search_sm) , dim3(dimGridSearch),
dim3( dimBlock),
2000 * sizeof(int), 0,
A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d, reps);
hipDeviceSynchronize();
stop();
printf("SM\t%ld\t", report());
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "SM search: %s.\n", hipGetErrorString( err) );
parallel_sum( R_d, block_size, A_size, 100 );
unsigned int x;
hipMemcpy(&x, R_d, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost);
printf("%ld\n", x);
}
// Move a big block of B then search a range of A
/*
dim3 dimGridSearch_SM2( (B_size + 1) / 2000 );
intersection_b_search_sm_2 <<<dimGridSearch,
dimBlock,
2000 * sizeof(int)>>> (
A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d,
2000);
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "SM 2: %s.\n", hipGetErrorString( err) );
*/
/*
unsigned int *R_h = (unsigned int *) malloc(
(A_size) * sizeof(unsigned int));
hipMemcpy(R_h, R_d, A_size * sizeof(unsigned int),
hipMemcpyDeviceToHost);
int n = 1024;
start();
parallel_sum( R_d, block_size, A_size, n );
stop();
unsigned long sum_time = report();
int x;
start();
hipMemcpy(&x, R_d, 1 * sizeof(int), hipMemcpyDeviceToHost);
stop();
unsigned long memdown_time = report();
printf("O: %d\n", x);
*/
/*
printf("size:%d,%d\tup:%ld\tsort:%ld\tsearch:%ld\tsum:%ld\tdown:%ld"
"\tcomp:%ld\ttotal:%ld\n",
A_size, B_size,
memup_time, sort_time, search_time, sum_time, memdown_time,
sort_time + search_time + sum_time,
memup_time + sort_time + search_time + sum_time + memdown_time);
*/
hipFree(A_key_d);
hipFree(B_key_d);
return 0;
}
|
5ef5b47362839e904510196c319ffaace8ebbde2.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cutil.h>
#include <sys/time.h>
#include "../lib/bed.h"
#include "../lib/set_intersect.h"
#include "radixsort.h"
//#include "gpu.hpp"
#include "random.hpp"
#include "../lib/timer.h"
//#include "order_kernel.cu"
#include "set_intersect_cuda.h"
int main(int argc, char *argv[]) {
cudaFree(NULL);
if (argc < 4) {
fprintf(stderr, "usage: order <u> <a> <b> <N>\n");
return 1;
}
int chrom_num = 24;
/***********************REPLACE WITH INPUT FILE************************/
char *chrom_names[] = {
"chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8",
"chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16",
"chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"
};
/**********************************************************************/
struct chr_list *U, *A, *B;
char *U_file = argv[1], *A_file = argv[2], *B_file = argv[3];
int reps = atoi(argv[4]);
if ( ( chr_list_from_bed_file(&U, chrom_names, chrom_num, U_file) == 1) ||
( chr_list_from_bed_file(&A, chrom_names, chrom_num, A_file) == 1) ||
( chr_list_from_bed_file(&B, chrom_names, chrom_num, B_file) == 1) ) {
fprintf(stderr, "Error parsing bed files.\n");
return 1;
}
unsigned int max = add_offsets(U, chrom_num);
trim(U, A, chrom_num);
trim(U, B, chrom_num);
int A_size, B_size, U_size;
struct bed_line *U_array, *A_array, *B_array;
U_size = chr_array_from_list(U, &U_array, chrom_num);
A_size = chr_array_from_list(A, &A_array, chrom_num);
B_size = chr_array_from_list(B, &B_array, chrom_num);
unsigned int *A_key_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *A_val_h =
(unsigned int *) malloc( (A_size) * sizeof(unsigned int));
unsigned int *B_key_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
unsigned int *B_val_h =
(unsigned int *) malloc( (B_size) * sizeof(unsigned int));
/*
* In CUDA we can sort key value pairs,
* the key can be the offset, and the value can be the length
*/
set_start_len( U_array, U_size,
A_array, A_key_h, A_val_h, A_size );
set_start_len( U_array, U_size,
B_array, B_key_h, B_val_h, B_size );
// Move A and B to deviceB
unsigned int *A_key_d, *A_val_d, *B_key_d, *B_val_d;
cudaMalloc((void **)&A_key_d, (A_size)*sizeof(unsigned int));
cudaMalloc((void **)&A_val_d, (A_size)*sizeof(unsigned int));
cudaMalloc((void **)&B_key_d, (B_size)*sizeof(unsigned int));
cudaMalloc((void **)&B_val_d, (B_size)*sizeof(unsigned int));
start();
cudaMemcpy(A_key_d, A_key_h, (A_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(A_val_d, A_val_h, (A_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(B_key_d, B_key_h, (B_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaMemcpy(B_val_d, B_val_h, (B_size) * sizeof(unsigned int),
cudaMemcpyHostToDevice);
stop();
unsigned long memup_time = report();
int block_size = 256;
dim3 dimBlock(block_size);
// R will hold the results of the intersection, for each interval A[i],
// R[i] will be the number of intervals in B that A[i] intersects,
unsigned int *R_d;
cudaMalloc((void **)&R_d, (A_size)*sizeof(unsigned int));
// *_key_d holds the start position, and *_val_d holds the length,
// the end position is *_key_d + *_val_d
//
// Each thread will search |reps| items in A, we will keep the blocksize
// fixed at 256, but we will need to adjust the grid size
dim3 dimGridSearch( ( A_size + 1) / (block_size * reps));
cudaError_t err;
int z;
for (z = 0; z < 1000; z++) {
start();
intersection_b_search <<<dimGridSearch,
dimBlock >>> ( A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d, reps);
cudaThreadSynchronize();
stop();
printf("GM\t%ld\t", report());
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "GM search: %s.\n", cudaGetErrorString( err) );
parallel_sum( R_d, block_size, A_size, 100 );
unsigned int x;
cudaMemcpy(&x, R_d, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("%ld\n", x);
}
start();
// Sort A
nvRadixSort::RadixSort radixsortA(A_size, false);
radixsortA.sort((unsigned int*)A_key_d, (unsigned int*)A_val_d,
A_size, 32);
// Sort B
nvRadixSort::RadixSort radixsortB(B_size, false);
radixsortB.sort((unsigned int*)B_key_d, (unsigned int*)B_val_d,
B_size, 32);
cudaThreadSynchronize();
stop();
for (z = 0; z < 1000; z++) {
start();
intersection_b_search_sm <<<dimGridSearch,
dimBlock,
2000 * sizeof(int)>>> (
A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d, reps);
cudaThreadSynchronize();
stop();
printf("SM\t%ld\t", report());
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "SM search: %s.\n", cudaGetErrorString( err) );
parallel_sum( R_d, block_size, A_size, 100 );
unsigned int x;
cudaMemcpy(&x, R_d, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("%ld\n", x);
}
// Move a big block of B then search a range of A
/*
dim3 dimGridSearch_SM2( (B_size + 1) / 2000 );
intersection_b_search_sm_2 <<<dimGridSearch,
dimBlock,
2000 * sizeof(int)>>> (
A_key_d, A_val_d, A_size,
B_key_d, B_val_d, B_size,
R_d,
2000);
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "SM 2: %s.\n", cudaGetErrorString( err) );
*/
/*
unsigned int *R_h = (unsigned int *) malloc(
(A_size) * sizeof(unsigned int));
cudaMemcpy(R_h, R_d, A_size * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
int n = 1024;
start();
parallel_sum( R_d, block_size, A_size, n );
stop();
unsigned long sum_time = report();
int x;
start();
cudaMemcpy(&x, R_d, 1 * sizeof(int), cudaMemcpyDeviceToHost);
stop();
unsigned long memdown_time = report();
printf("O: %d\n", x);
*/
/*
printf("size:%d,%d\tup:%ld\tsort:%ld\tsearch:%ld\tsum:%ld\tdown:%ld"
"\tcomp:%ld\ttotal:%ld\n",
A_size, B_size,
memup_time, sort_time, search_time, sum_time, memdown_time,
sort_time + search_time + sum_time,
memup_time + sort_time + search_time + sum_time + memdown_time);
*/
cudaFree(A_key_d);
cudaFree(B_key_d);
return 0;
}
|
3c8d88ee7bfc1bb7dd5421ecc31db9e8fba8a442.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <vector>
#include <utility>
#include "caffe/util/benchmark.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/rnn_base_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include "caffe/layers/sigmoid_layer.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
inline Dtype sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Merge_gpu(Node<Dtype>* left, Node<Dtype>* right, Node<Dtype>* top,
Dtype* layer_top, Dtype* layer_label, bool correct_tree, bool test){
//CPUTimer time;
//time.Start();
//M_: K_: N_:
int left_id = left->is_leaf ? 0: 1;
int right_id = right->is_leaf ? 0: 1;
const Dtype* left_feature = left->bottom[left_id]->gpu_data();
const Dtype* right_feature = right->bottom[right_id]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = this->blobs_[1]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
M_ = 1;
N_ = channels_;
K_ = 2 * N_;
Blob<Dtype>* joint = new Blob<Dtype>(1, K_, 1, 1);
Dtype *joint_feature = joint->mutable_gpu_data();
caffe_copy(N_, left_feature, joint_feature);
caffe_copy(N_, right_feature, joint_feature + N_);
//cal bbox[c1, r1, c2, r2]
for(int i = 0; i < 2; i++)
top->rect.push_back(::min(left->rect[i], right->rect[i]));
for(int i = 2; i < 4; i++)
top->rect.push_back(::max(left->rect[i], right->rect[i]));
//cal label
if(correct_tree && !test){
top->map_id = map_id_;
layer_label[map_id_] = label(gt, top->rect);
}
//allocate space for the feature
for(int i = 0; i < 2; i++){
top->bottom.push_back(new Blob<Dtype>(1, N_, 1, 1));
}
//M, N, K. A, B, C;
//A = M * K, B = K * N, C = M * N
//weight: N * K
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
joint_feature, weight, (Dtype)0., top->bottom[0]->mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(), bias, (Dtype)1., top->bottom[0]->mutable_gpu_data());
delete joint;
// relu
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(top->bottom[0]);
relu_top_vec_.push_back(top->bottom[1]);
relu_layer_->Forward(relu_bottom_vec_, relu_top_vec_);
//sigmoid
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(top->bottom[0]);
// sigmoid_top_vec_.push_back(top->bottom[1]);
// sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
//copy to the top data
if(correct_tree && !test) {
layer_top += map_id_ * channels_;
caffe_copy(channels_, top->bottom[1]->mutable_gpu_data(), layer_top);
map_id_ ++;
}
//compute merge score
//w_score: 1 * N
M_ = 1;
K_ = channels_;
N_ = 1;
Blob<Dtype> score;
score.Reshape(1,1,1,1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
top->bottom[1]->gpu_data(), w_score, (Dtype)0., score.mutable_gpu_data());
//sigmoid layer
top->merge_score = score.cpu_data()[0];
//
top->left = left;
top->right = right;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Backward_gpu(Node<Dtype> *left, Node<Dtype> *right, Node<Dtype> *top, const Dtype* layer_top, bool correct_tree){
//CPUTimer time;
//time.Start();
if(top->is_leaf && !correct_tree)
return;
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* w_score_diff = this->blobs_[2]->mutable_gpu_diff();
//softmaxfeature
if(correct_tree){
//layer_top += top->map_id * channels_;
//__asm__("int $3");
if(top->is_leaf) {
//caffe_gpu_add(channels_, layer_top, top->bottom[0]->gpu_diff(), top->bottom[0]->mutable_gpu_diff());
return;
}
/*
else
caffe_gpu_add(channels_, layer_top, top->bottom[1]->gpu_diff(), top->bottom[1]->mutable_gpu_diff());
*/
}
// merge_scorefeature
M_ = 1;
K_ = channels_;
N_ = 1;
//loss = (largest_tree_score - correct_tree_score) / batch_size
//Dtype score = top->merge_score;
Dtype value = -1.0/batch_size;
//Dtype tmp = 1.0;
/*tmp = tmp * exp(tree_loss[selected_num]);
tmp = tmp /(1 + tmp);*/
//tmp = 100 * tmp / (seg_size - 1);
/* value = value * tmp;
if(rand() % 20000 == 0)
LOG(INFO) << tmp;*/
if(!correct_tree) {
//__asm__("int $3");
value *= -1;
}
//value = value + 0.01 * score / batch_size;
Blob<Dtype> Diff;
Diff.Reshape(1, 1, 1, 1);
Dtype* diff = Diff.mutable_cpu_data();
diff[0] = value;
//may be accelerated by other function
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
Diff.gpu_data(), top->bottom[1]->gpu_data(), (Dtype)1., w_score_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
Diff.gpu_data(), w_score, (Dtype)1., top->bottom[1]->mutable_gpu_diff());
//relu
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(top->bottom[0]);
relu_top_vec_.push_back(top->bottom[1]);
vector<bool> propaget_down(1, true);
relu_layer_->Backward(relu_top_vec_, propaget_down, relu_bottom_vec_);
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(top->bottom[0]);
// sigmoid_top_vec_.push_back(top->bottom[1]);
// vector<bool> propaget_down(1, true);
// sigmoid_layer_->Backward(sigmoid_top_vec_, propaget_down, sigmoid_bottom_vec_);
//now backward to two son node
//careful for others
int left_id = left->is_leaf ? 0: 1;
int right_id = right->is_leaf ? 0: 1;
const Dtype* left_feature = left->bottom[left_id]->gpu_data();
const Dtype* right_feature = right->bottom[right_id]->gpu_data();
Blob<Dtype>* joint = new Blob<Dtype>(1, 2*channels_, 1, 1);
Dtype *joint_feature = joint->mutable_gpu_data();
//Dtype *joint_feature = new Dtype[2*channels_];
caffe_copy(channels_, left_feature, joint_feature);
caffe_copy(channels_, right_feature, joint_feature + channels_);
M_ = 1;
N_ = channels_;
K_ = 2 * N_;
//LOG(INFO)<<"ENTER .................";
//weightbias
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top->bottom[0]->gpu_diff(), joint_feature, (Dtype)1., weight_diff);
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top->bottom[0]->gpu_diff(),
bias_multiplier_.gpu_data(), (Dtype)1., bias_diff);
//delta
//Dtype* delta = new Dtype[K_];
Dtype* delta = joint->mutable_gpu_diff();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top->bottom[0]->gpu_diff(), weight, (Dtype)0., delta);
caffe_gpu_add(N_, delta, left->bottom[left_id]->gpu_diff(), left->bottom[left_id]->mutable_gpu_diff());
caffe_gpu_add(N_, delta + N_, right->bottom[right_id]->gpu_diff(), right->bottom[right_id]->mutable_gpu_diff());
delete joint;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::get_score_gpu(const Blob<Dtype>* input, Blob<Dtype>* merge_score) {
//input blobs
CPUTimer time;
time.Start();
const Dtype* weight = this->blobs_[0]->gpu_data();
//__asm__("int $3");
const Dtype* bias = this->blobs_[1]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
M_ = input->num();
N_ = channels_;
K_ = 2 * N_;
Blob<Dtype>* out = new Blob<Dtype>(M_, N_, 1, 1);
const Dtype* joint_feature = input->gpu_data();
Dtype* out_feature = out->mutable_gpu_data();
//M, N, K. A, B, C;
//A = M * K, B = K * N, C = M * N
//weight: N * K
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
joint_feature, weight, (Dtype)0., out_feature);
//notice the matrix dimension
Blob<Dtype> matrix_bias_multiplier_;
matrix_bias_multiplier_.Reshape(1, 1, 1, M_);
caffe_set(matrix_bias_multiplier_.count(), Dtype(1), matrix_bias_multiplier_.mutable_cpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
matrix_bias_multiplier_.gpu_data(), bias, (Dtype)1., out_feature);
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(out);
relu_top_vec_.push_back(out);
relu_layer_->Forward(relu_bottom_vec_, relu_top_vec_);
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(out);
// sigmoid_top_vec_.push_back(out);
// sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
M_ = input->num();
K_ = channels_;
N_ = 1;
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
out_feature, w_score, (Dtype)0., merge_score->mutable_gpu_data());
delete out;
time.Stop();
//LOG(INFO)<<"get score per cost "<<time.MilliSeconds()<<".ms";
return ;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::build_tree_gpu(vector<Node<Dtype>*> &tree, vector<int> seg_class, vector<bool> adj,
vector<Pair<Dtype> > pair, Dtype* layer_top, Dtype* layer_label, bool correct_tree, bool test) {
CPUTimer time;
vector<Pair<Dtype> > pair_1;
pair_1.clear();
//time.Start();
CHECK_EQ(seg_class.size(), seg_size);
//__asm__("int $3");
//LOG(INFO)<<"loss per error is "<<loss_per_error_;
for(int i = 0; i < pair.size(); i++) {
int l_id = pair[i].l, r_id = pair[i].r;
if(!correct_tree){
pair[i].score += loss_per_error_ * (1 - Is_oneclass(l_id, r_id, seg_class));
// LOG(INFO)<<i<<" "<<pair[i].score;
}
}
for(int k = seg_size; k < total_num; ++k) {
// time.Start();
sort(pair.begin(), pair.end());
int max_id = 0;
if(correct_tree && !test){
for(int i = 0; i < pair.size(); i++){
if(Is_oneclass(pair[i].l, pair[i].r, seg_class)){
max_id = i;
break;
}
}
}
if(correct_tree) loss -= pair[max_id].score;
else loss += pair[max_id].score;
int left_id = pair[max_id].l, right_id = pair[max_id].r;
//find the most high pair
tree.push_back(new Node<Dtype>());
//merge the node
if(correct_tree)
Merge_gpu(tree[left_id], tree[right_id], tree[tree.size()-1], layer_top, layer_label, correct_tree, test);
else{
//To use the adj space and others
//we obey the order first correct tree, second highest tree
//must noticed the l_id, r_id.
int l_id = left_id >= seg_size && left_id < total_num ? left_id + seg_size - 1 : left_id;
int r_id = right_id >= seg_size && right_id < total_num ? right_id + seg_size - 1 : right_id;
Merge_gpu(tree[l_id], tree[r_id], tree[tree.size()-1], layer_top, layer_label, correct_tree, test);
}
//update current class
if(Is_oneclass(left_id, right_id, seg_class))
seg_class.push_back(seg_class[left_id]);
else
seg_class.push_back(-1);
/*if(rand() % 5000 == 0 && !test){
if(seg_class[seg_class.size()-1] == -1 && !correct_tree)
LOG(INFO)<<"Wrong merge "<<pair[max_id].score<<" loss per error "<<loss_per_error_;
else
LOG(INFO)<<"Correct merge "<<pair[max_id].score<<" loss per error "<<loss_per_error_;
}*/
//remove nonexist pair
for(int i = 0; i < pair.size(); i++){
if(pair[i].l == left_id || pair[i].l == right_id
|| pair[i].r == left_id || pair[i].r == right_id)
continue;
pair_1.push_back(pair[i]);
}
//update adj matrix
vector<int> tmp;
for(int i = 0; i < total_num; i++){
if(adj[go(left_id, i)] || adj[go(right_id, i)])
tmp.push_back(i);
}
for(int i = 0; i < tmp.size(); i++){
adj[go(k,tmp[i])] = adj[go(tmp[i],k)] = true;
}
for(int i = 0; i < total_num; i++){
adj[go(left_id,i)] = adj[go(i,left_id)] = false;
adj[go(right_id,i)] = adj[go(i,right_id)] = false;
}
//update merge score
time.Start();
int cnt = 0;
for(int i = 0; i < total_num ; i++){
if(adj[go(k,i)])
cnt ++;
}
//for 2 direction
cnt *= 2;
if(cnt == 0) continue;
Blob<Dtype> input, merge_score;
//__asm__("int $3");
input.Reshape(cnt, 2*channels_, 1, 1);
merge_score.Reshape(cnt, 1, 1, 1);
Dtype* input_feature = input.mutable_gpu_data();
for(int i = 0; i < total_num; ++i) {
if(adj[go(k,i)]){
int id = i;
if(!correct_tree)
id = i >= seg_size && i < total_num ? i + seg_size - 1 : i;
int l_id = tree[tree.size()-1]->is_leaf ? 0: 1;
int r_id = tree[id]->is_leaf ? 0: 1;
const Dtype* feat1 = tree[tree.size()-1]->bottom[l_id]->gpu_data();
const Dtype* feat2 = tree[id]->bottom[r_id]->gpu_data();
caffe_copy(channels_, feat1, input_feature);
caffe_copy(channels_, feat2, input_feature + channels_);
input_feature += input.offset(1);
caffe_copy(channels_, feat2, input_feature);
caffe_copy(channels_, feat1, input_feature + channels_);
input_feature += input.offset(1);
}
}
//__asm__("int $3");
get_score_gpu(&input, &merge_score);
const Dtype* score = merge_score.cpu_data();
int id = 0;
for(int i = 0; i < total_num; ++i) {
if(adj[go(k,i)]){
Dtype score_1 = score[id++], score_2 = score[id++];
//time.Stop();
//LOG(INFO)<<"get score cost "<<time.MilliSeconds()<<"ms.";
if(!correct_tree){
score_1 += loss_per_error_ * (1 - Is_oneclass(k, i, seg_class));
score_2 += loss_per_error_ * (1 - Is_oneclass(i, k, seg_class));
}
pair_1.push_back(Pair<Dtype>(k, i, score_1));
pair_1.push_back(Pair<Dtype>(i, k, score_2));
}
}
time.Stop();
//LOG(INFO)<<"update get_score is "<<time.MilliSeconds()<<"ms.";
pair.clear();
pair = pair_1;
pair_1.clear();
}
//__asm__("int $3");
/*
if(!correct_tree) {
for(int k = 0; k < seg_class.size(); ++k){
LOG(INFO)<<seg_class[k];
}
__asm__("int $3");
}*/
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//bottom[0]->[roi_num, dim, 1, 1]---->feature
//bottom[1]->[batch_size, 1, 1]----->id for each image
//bottom[2]->[roi_num, 5, 1, 1]------>roi_num coordinate
//blob
//top[0]->[roi, dim, 1, 1]
//top[1]->gt
//LOG(INFO)<<"Enter into the RNN layer............";
int roi_num = bottom[0]->num();
int dim = bottom[0]->channels();
int out_num = 2 * roi_num - batch_size;
//LOG(INFO)<<"batch_size is "<<batch_size;
top[0]->Reshape(out_num, dim, 1, 1);
top[1]->Reshape(out_num, 1, 1, 1);
const Dtype* feature = bottom[0]->gpu_data();
const Dtype* id = bottom[1]->cpu_data();
const Dtype* roi = bottom[2]->cpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//note for cpu
Dtype* top_label = top[1]->mutable_cpu_data();
//id0
map_id_ = 0;
//tree loss is set to 0
loss = (Dtype)0.;
int hit_1 = 0, hit_2 = 0, hit_3 = 0, hit_4 = 0, total_gt = 0;
// __asm__("int $3");
Dtype total_loss = 0;
tree_loss.clear();
for(int batch_id = 0; batch_id < batch_size; batch_id ++){
//get segmentation size
int index = id[batch_id];
//read gt
gt = ReadFileToVector(gt_folder + lines_[index] + ".txt");
//read adj
vec_adj = ReadFileToVector(adj_folder + lines_[index] + ".txt");
//read class
seg_class = ReadFileToVector(class_folder + lines_[index] + ".txt");
seg_size = seg_class.size();
//__asm__("int $3");
vector<Node<Dtype>*> tree;
tree.clear();
//CPUTimer timer;
//timer.Start();
total_num = 2 * seg_size - 1;
DLOG(INFO)<<"Forward seg_size is "<<seg_size;
for(int i = 0; i < seg_size; i++){
tree.push_back(new Node<Dtype>());
tree[i]->bottom.push_back(new Blob<Dtype>(1, dim, 1, 1));
//LOG(INFO)<<"map_id_ should be -1"<<tree[i]->map_id;
//LOG(INFO)<<"is_leaf should be false"<<tree[i]->is_leaf;
tree[i]->map_id = map_id_;
tree[i]->is_leaf = true;
Dtype* src = tree[i]->bottom[0]->mutable_gpu_data();
caffe_copy(dim, feature + i * dim, src);
caffe_copy(dim, feature + i * dim, top_data + map_id_ * channels_);
for(int j = 1; j <= 4; ++j) tree[i]->rect.push_back(roi[j]);
top_label[map_id_] = label(gt, tree[i]->rect);
roi += bottom[2]->offset(1);
map_id_ ++;
}
vector<bool> adj(total_num * total_num, false);
int cnt = 0;
// read adj matrix
for(int i = 0; i < seg_size; i++){
for(int j = 0; j < seg_size; j++){
int offset = i * seg_size + j;
if(vec_adj[offset] == 1) {
adj[go(i,j)] = true;
cnt ++;
}
}
}
vector<Pair<Dtype> > pair;
pair.clear();
//__asm__("int $3");
CPUTimer time;
time.Start();
Blob<Dtype>* input = new Blob<Dtype>(cnt, 2 * channels_, 1, 1);
Blob<Dtype>* merge_score = new Blob<Dtype>(cnt, 1, 1, 1);
Dtype* input_feature = input->mutable_gpu_data();
for(int i = 0; i < seg_size; ++i){
for(int j = 0; j < seg_size; ++j){
const Dtype* feat1 = feature + i * dim;
const Dtype* feat2 = feature + j * dim;
if(adj[go(i,j)]){
caffe_copy(channels_, feat1, input_feature);
caffe_copy(channels_, feat2, input_feature + channels_);
//__asm__("int $3");
input_feature += 2 * channels_;
pair.push_back(Pair<Dtype>(i, j, 0));
}
}
}
get_score_gpu(input, merge_score);
//check for eq
CHECK_EQ(pair.size(), merge_score->count());
for(int i = 0; i < pair.size(); ++i) {
pair[i].score = merge_score->cpu_data()[i];
}
//__asm__("int $3");
delete input;
delete merge_score;
time.Stop();
DLOG(INFO)<<"Initial cal pairs "<<time.MilliSeconds() << "ms.";
//__asm__("int $3");
//LOG(INFO)<<"modify the tree...............";
//
CPUTimer timer;
timer.Start();
if(this->layer_param_.phase() == caffe::TEST){
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, true, true);
feature += seg_size * dim;
vector<int> ret = get_recall(tree, true);
//__asm__("int $3");
hit_1 += ret[0];
hit_2 += ret[1];
total_gt += gt.size() / 4;
//destroy this tree
for(int j = 0; j < tree.size(); j++){
delete tree[j]->bottom[0];
if(!tree[j]->is_leaf)
delete tree[j]->bottom[1];
delete tree[j];
}
continue;
}
loss = 0.0;
//
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, true, false);
//LOG(INFO)<<"BUILD CORRECT TREE FINISHED!";
DLOG(INFO)<<debug(tree[total_num-1])<<" should equal to "<<total_num;
//__asm__("int $3");
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, false, false);
DLOG(INFO)<<debug(tree[tree.size()-1])<<" should equal to "<<total_num;
timer.Stop();
DLOG(INFO)<<"RNN forward cost "<<timer.MilliSeconds() <<"ms.";
tree_loss.push_back(loss / (seg_size - 1));
//total_loss += log(1 + exp(loss / (seg_size - 1)));
total_loss += loss;
vector<int> ret = get_recall(tree, false);
//__asm__("int $3");
hit_1 += ret[0];
hit_2 += ret[1];
hit_3 += ret[2];
hit_4 += ret[3];
total_gt += gt.size() / 4;
forest.push_back(tree);
feature += seg_size * dim;
}
if(this->layer_param_.phase() == TEST)
LOG(INFO)<<"Test "<<total_loss / batch_size << " " <<hit_1<<" "<<hit_2<<" "<<" "<<hit_3<<" "<<hit_4<<" "<<total_gt;
else
LOG(INFO)<<"TRAIN "<<total_loss / batch_size <<" "<<hit_1<<" "<<hit_2<<" "<<" "<<hit_3<<" "<<hit_4<<" "<<total_gt;
top[2]->mutable_cpu_data()[0] = total_loss / batch_size / 2;
top[3]->mutable_cpu_data()[0] = (Dtype)1.0 * hit_1 / total_gt;
top[3]->mutable_cpu_data()[1] = (Dtype)1.0 * hit_2 / total_gt;
top[3]->mutable_cpu_data()[2] = (Dtype)1.0 * hit_3 / total_gt;
top[3]->mutable_cpu_data()[3] = (Dtype)1.0 * hit_4 / total_gt;
}
template<typename Dtype>
int RNNBaseLayer<Dtype>::debug(Node<Dtype>* root){
if(root->is_leaf) return 1;
int num = 1;
num += debug(root->left);
num += debug(root->right);
return num;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
//int roi_num = bottom[0]->num();
int dim = bottom[0]->channels();
//LOG(INFO)<<"dim is ..."<<dim;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
//CPUTimer time;
//time.Start();
for(int i = 0; i < batch_size; i++){
selected_num = i;
vector<Node<Dtype>*> tree = forest[i];
seg_size = (tree.size() + 2) / 3;
total_num = 2 * seg_size - 1;
//first correct tree
std::stack<Node<Dtype>*> recursive;
//__asm__("int $3");
recursive.push(tree[total_num - 1]);
while(!recursive.empty()){
Node<Dtype>* top_node = recursive.top();
Backward_gpu(top_node->left, top_node->right, top_node, top_diff, true);
recursive.pop();
if(top_node->right != NULL) recursive.push(top_node->right);
if(top_node->left != NULL) recursive.push(top_node->left);
}
//LOG(INFO)<<"should enter count is "<<cnt<<" "<<total_num;
//high score tree
recursive.push(tree[tree.size() - 1]);
while(!recursive.empty()){
Node<Dtype>* top_node = recursive.top();
//LOG(INFO)<<"enter ....";
Backward_gpu(top_node->left, top_node->right, top_node, top_diff, false);
recursive.pop();
if(top_node->right != NULL) recursive.push(top_node->right);
if(top_node->left != NULL) recursive.push(top_node->left);
}
//backward
//LOG(INFO)<<".......high tree finished...";
for(int j = 0; j < seg_size; j++){
caffe_copy(dim, tree[j]->bottom[0]->gpu_diff(), bottom_diff);
bottom_diff += dim;
}
}
const Dtype* test_weight = this->blobs_[0]->cpu_data();
//__asm__("int $3");
//time.Stop();
//LOG(INFO)<<"RNN backward cost "<<time.MilliSeconds() <<"ms.";
//LOG(INFO)<<"delete all space ....";
//delete all space
for(int i = 0; i < batch_size; i++){
vector<Node<Dtype>*> tree = forest[i];
for(int j = 0; j < tree.size(); j++){
delete tree[j]->bottom[0];
if(!tree[j]->is_leaf)
delete tree[j]->bottom[1];
delete tree[j];
}
}
forest.clear();
tree_loss.clear();
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNBaseLayer);
}
|
3c8d88ee7bfc1bb7dd5421ecc31db9e8fba8a442.cu
|
#include <fstream>
#include <iostream>
#include <vector>
#include <utility>
#include "caffe/util/benchmark.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/rnn_base_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include "caffe/layers/sigmoid_layer.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
inline Dtype sigmoid(Dtype x) {
return 1. / (1. + exp(-x));
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Merge_gpu(Node<Dtype>* left, Node<Dtype>* right, Node<Dtype>* top,
Dtype* layer_top, Dtype* layer_label, bool correct_tree, bool test){
//CPUTimer time;
//time.Start();
//M_:样本的个数 K_:样本的特征维数 N_: 输出的特征维数
int left_id = left->is_leaf ? 0: 1;
int right_id = right->is_leaf ? 0: 1;
const Dtype* left_feature = left->bottom[left_id]->gpu_data();
const Dtype* right_feature = right->bottom[right_id]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = this->blobs_[1]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
M_ = 1;
N_ = channels_;
K_ = 2 * N_;
Blob<Dtype>* joint = new Blob<Dtype>(1, K_, 1, 1);
Dtype *joint_feature = joint->mutable_gpu_data();
caffe_copy(N_, left_feature, joint_feature);
caffe_copy(N_, right_feature, joint_feature + N_);
//cal 合并之后的bbox[c1, r1, c2, r2]
for(int i = 0; i < 2; i++)
top->rect.push_back(std::min(left->rect[i], right->rect[i]));
for(int i = 2; i < 4; i++)
top->rect.push_back(std::max(left->rect[i], right->rect[i]));
//cal label
if(correct_tree && !test){
top->map_id = map_id_;
layer_label[map_id_] = label(gt, top->rect);
}
//allocate space for the feature
for(int i = 0; i < 2; i++){
top->bottom.push_back(new Blob<Dtype>(1, N_, 1, 1));
}
//M, N, K. A, B, C;
//A = M * K, B = K * N, C = M * N
//weight: N * K
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
joint_feature, weight, (Dtype)0., top->bottom[0]->mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(), bias, (Dtype)1., top->bottom[0]->mutable_gpu_data());
delete joint;
// relu
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(top->bottom[0]);
relu_top_vec_.push_back(top->bottom[1]);
relu_layer_->Forward(relu_bottom_vec_, relu_top_vec_);
//sigmoid
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(top->bottom[0]);
// sigmoid_top_vec_.push_back(top->bottom[1]);
// sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
//copy to the top data
if(correct_tree && !test) {
layer_top += map_id_ * channels_;
caffe_copy(channels_, top->bottom[1]->mutable_gpu_data(), layer_top);
map_id_ ++;
}
//compute merge score
//w_score: 1 * N
M_ = 1;
K_ = channels_;
N_ = 1;
Blob<Dtype> score;
score.Reshape(1,1,1,1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
top->bottom[1]->gpu_data(), w_score, (Dtype)0., score.mutable_gpu_data());
//sigmoid layer
top->merge_score = score.cpu_data()[0];
//链接上左右孩子
top->left = left;
top->right = right;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Backward_gpu(Node<Dtype> *left, Node<Dtype> *right, Node<Dtype> *top, const Dtype* layer_top, bool correct_tree){
//CPUTimer time;
//time.Start();
if(top->is_leaf && !correct_tree)
return;
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
Dtype* w_score_diff = this->blobs_[2]->mutable_gpu_diff();
//softmax对feature求导数
if(correct_tree){
//layer_top += top->map_id * channels_;
//__asm__("int $3");
if(top->is_leaf) {
//caffe_gpu_add(channels_, layer_top, top->bottom[0]->gpu_diff(), top->bottom[0]->mutable_gpu_diff());
return;
}
/*
else
caffe_gpu_add(channels_, layer_top, top->bottom[1]->gpu_diff(), top->bottom[1]->mutable_gpu_diff());
*/
}
// merge_score对feature求导数
M_ = 1;
K_ = channels_;
N_ = 1;
//loss = (largest_tree_score - correct_tree_score) / batch_size
//Dtype score = top->merge_score;
Dtype value = -1.0/batch_size;
//Dtype tmp = 1.0;
/*tmp = tmp * exp(tree_loss[selected_num]);
tmp = tmp /(1 + tmp);*/
//tmp = 100 * tmp / (seg_size - 1);
/* value = value * tmp;
if(rand() % 20000 == 0)
LOG(INFO) << tmp;*/
if(!correct_tree) {
//__asm__("int $3");
value *= -1;
}
//value = value + 0.01 * score / batch_size;
Blob<Dtype> Diff;
Diff.Reshape(1, 1, 1, 1);
Dtype* diff = Diff.mutable_cpu_data();
diff[0] = value;
//may be accelerated by other function
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
Diff.gpu_data(), top->bottom[1]->gpu_data(), (Dtype)1., w_score_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
Diff.gpu_data(), w_score, (Dtype)1., top->bottom[1]->mutable_gpu_diff());
//relu
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(top->bottom[0]);
relu_top_vec_.push_back(top->bottom[1]);
vector<bool> propaget_down(1, true);
relu_layer_->Backward(relu_top_vec_, propaget_down, relu_bottom_vec_);
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(top->bottom[0]);
// sigmoid_top_vec_.push_back(top->bottom[1]);
// vector<bool> propaget_down(1, true);
// sigmoid_layer_->Backward(sigmoid_top_vec_, propaget_down, sigmoid_bottom_vec_);
//now backward to two son node
//careful for others
int left_id = left->is_leaf ? 0: 1;
int right_id = right->is_leaf ? 0: 1;
const Dtype* left_feature = left->bottom[left_id]->gpu_data();
const Dtype* right_feature = right->bottom[right_id]->gpu_data();
Blob<Dtype>* joint = new Blob<Dtype>(1, 2*channels_, 1, 1);
Dtype *joint_feature = joint->mutable_gpu_data();
//Dtype *joint_feature = new Dtype[2*channels_];
caffe_copy(channels_, left_feature, joint_feature);
caffe_copy(channels_, right_feature, joint_feature + channels_);
M_ = 1;
N_ = channels_;
K_ = 2 * N_;
//LOG(INFO)<<"ENTER .................";
//求出对weight以及bias的偏导
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top->bottom[0]->gpu_diff(), joint_feature, (Dtype)1., weight_diff);
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top->bottom[0]->gpu_diff(),
bias_multiplier_.gpu_data(), (Dtype)1., bias_diff);
//求出对delta的偏导
//Dtype* delta = new Dtype[K_];
Dtype* delta = joint->mutable_gpu_diff();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top->bottom[0]->gpu_diff(), weight, (Dtype)0., delta);
caffe_gpu_add(N_, delta, left->bottom[left_id]->gpu_diff(), left->bottom[left_id]->mutable_gpu_diff());
caffe_gpu_add(N_, delta + N_, right->bottom[right_id]->gpu_diff(), right->bottom[right_id]->mutable_gpu_diff());
delete joint;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::get_score_gpu(const Blob<Dtype>* input, Blob<Dtype>* merge_score) {
//input blobs
CPUTimer time;
time.Start();
const Dtype* weight = this->blobs_[0]->gpu_data();
//__asm__("int $3");
const Dtype* bias = this->blobs_[1]->gpu_data();
const Dtype* w_score = this->blobs_[2]->gpu_data();
M_ = input->num();
N_ = channels_;
K_ = 2 * N_;
Blob<Dtype>* out = new Blob<Dtype>(M_, N_, 1, 1);
const Dtype* joint_feature = input->gpu_data();
Dtype* out_feature = out->mutable_gpu_data();
//M, N, K. A, B, C;
//A = M * K, B = K * N, C = M * N
//weight: N * K
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
joint_feature, weight, (Dtype)0., out_feature);
//notice the matrix dimension
Blob<Dtype> matrix_bias_multiplier_;
matrix_bias_multiplier_.Reshape(1, 1, 1, M_);
caffe_set(matrix_bias_multiplier_.count(), Dtype(1), matrix_bias_multiplier_.mutable_cpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
matrix_bias_multiplier_.gpu_data(), bias, (Dtype)1., out_feature);
relu_bottom_vec_.clear();
relu_top_vec_.clear();
relu_bottom_vec_.push_back(out);
relu_top_vec_.push_back(out);
relu_layer_->Forward(relu_bottom_vec_, relu_top_vec_);
// sigmoid_bottom_vec_.clear();
// sigmoid_top_vec_.clear();
// sigmoid_bottom_vec_.push_back(out);
// sigmoid_top_vec_.push_back(out);
// sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
M_ = input->num();
K_ = channels_;
N_ = 1;
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
out_feature, w_score, (Dtype)0., merge_score->mutable_gpu_data());
delete out;
time.Stop();
//LOG(INFO)<<"get score per cost "<<time.MilliSeconds()<<".ms";
return ;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::build_tree_gpu(vector<Node<Dtype>*> &tree, vector<int> seg_class, vector<bool> adj,
vector<Pair<Dtype> > pair, Dtype* layer_top, Dtype* layer_label, bool correct_tree, bool test) {
CPUTimer time;
vector<Pair<Dtype> > pair_1;
pair_1.clear();
//time.Start();
CHECK_EQ(seg_class.size(), seg_size);
//__asm__("int $3");
//LOG(INFO)<<"loss per error is "<<loss_per_error_;
for(int i = 0; i < pair.size(); i++) {
int l_id = pair[i].l, r_id = pair[i].r;
if(!correct_tree){
pair[i].score += loss_per_error_ * (1 - Is_oneclass(l_id, r_id, seg_class));
// LOG(INFO)<<i<<" "<<pair[i].score;
}
}
for(int k = seg_size; k < total_num; ++k) {
// time.Start();
sort(pair.begin(), pair.end());
int max_id = 0;
if(correct_tree && !test){
for(int i = 0; i < pair.size(); i++){
if(Is_oneclass(pair[i].l, pair[i].r, seg_class)){
max_id = i;
break;
}
}
}
if(correct_tree) loss -= pair[max_id].score;
else loss += pair[max_id].score;
int left_id = pair[max_id].l, right_id = pair[max_id].r;
//find the most high pair
tree.push_back(new Node<Dtype>());
//merge the node
if(correct_tree)
Merge_gpu(tree[left_id], tree[right_id], tree[tree.size()-1], layer_top, layer_label, correct_tree, test);
else{
//To use the adj space and others
//we obey the order first correct tree, second highest tree
//must noticed the l_id, r_id.
int l_id = left_id >= seg_size && left_id < total_num ? left_id + seg_size - 1 : left_id;
int r_id = right_id >= seg_size && right_id < total_num ? right_id + seg_size - 1 : right_id;
Merge_gpu(tree[l_id], tree[r_id], tree[tree.size()-1], layer_top, layer_label, correct_tree, test);
}
//update current class
if(Is_oneclass(left_id, right_id, seg_class))
seg_class.push_back(seg_class[left_id]);
else
seg_class.push_back(-1);
/*if(rand() % 5000 == 0 && !test){
if(seg_class[seg_class.size()-1] == -1 && !correct_tree)
LOG(INFO)<<"Wrong merge "<<pair[max_id].score<<" loss per error "<<loss_per_error_;
else
LOG(INFO)<<"Correct merge "<<pair[max_id].score<<" loss per error "<<loss_per_error_;
}*/
//remove nonexist pair
for(int i = 0; i < pair.size(); i++){
if(pair[i].l == left_id || pair[i].l == right_id
|| pair[i].r == left_id || pair[i].r == right_id)
continue;
pair_1.push_back(pair[i]);
}
//update adj matrix
vector<int> tmp;
for(int i = 0; i < total_num; i++){
if(adj[go(left_id, i)] || adj[go(right_id, i)])
tmp.push_back(i);
}
for(int i = 0; i < tmp.size(); i++){
adj[go(k,tmp[i])] = adj[go(tmp[i],k)] = true;
}
for(int i = 0; i < total_num; i++){
adj[go(left_id,i)] = adj[go(i,left_id)] = false;
adj[go(right_id,i)] = adj[go(i,right_id)] = false;
}
//update merge score
time.Start();
int cnt = 0;
for(int i = 0; i < total_num ; i++){
if(adj[go(k,i)])
cnt ++;
}
//for 2 direction
cnt *= 2;
if(cnt == 0) continue;
Blob<Dtype> input, merge_score;
//__asm__("int $3");
input.Reshape(cnt, 2*channels_, 1, 1);
merge_score.Reshape(cnt, 1, 1, 1);
Dtype* input_feature = input.mutable_gpu_data();
for(int i = 0; i < total_num; ++i) {
if(adj[go(k,i)]){
int id = i;
if(!correct_tree)
id = i >= seg_size && i < total_num ? i + seg_size - 1 : i;
int l_id = tree[tree.size()-1]->is_leaf ? 0: 1;
int r_id = tree[id]->is_leaf ? 0: 1;
const Dtype* feat1 = tree[tree.size()-1]->bottom[l_id]->gpu_data();
const Dtype* feat2 = tree[id]->bottom[r_id]->gpu_data();
caffe_copy(channels_, feat1, input_feature);
caffe_copy(channels_, feat2, input_feature + channels_);
input_feature += input.offset(1);
caffe_copy(channels_, feat2, input_feature);
caffe_copy(channels_, feat1, input_feature + channels_);
input_feature += input.offset(1);
}
}
//__asm__("int $3");
get_score_gpu(&input, &merge_score);
const Dtype* score = merge_score.cpu_data();
int id = 0;
for(int i = 0; i < total_num; ++i) {
if(adj[go(k,i)]){
Dtype score_1 = score[id++], score_2 = score[id++];
//time.Stop();
//LOG(INFO)<<"get score cost "<<time.MilliSeconds()<<"ms.";
if(!correct_tree){
score_1 += loss_per_error_ * (1 - Is_oneclass(k, i, seg_class));
score_2 += loss_per_error_ * (1 - Is_oneclass(i, k, seg_class));
}
pair_1.push_back(Pair<Dtype>(k, i, score_1));
pair_1.push_back(Pair<Dtype>(i, k, score_2));
}
}
time.Stop();
//LOG(INFO)<<"update get_score is "<<time.MilliSeconds()<<"ms.";
pair.clear();
pair = pair_1;
pair_1.clear();
}
//__asm__("int $3");
/*
if(!correct_tree) {
for(int k = 0; k < seg_class.size(); ++k){
LOG(INFO)<<seg_class[k];
}
__asm__("int $3");
}*/
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//bottom[0]->[roi_num, dim, 1, 1]---->feature
//bottom[1]->[batch_size, 1, 1]----->id for each image
//bottom[2]->[roi_num, 5, 1, 1]------>roi_num coordinate
//合并得到的blob
//top[0]->[roi, dim, 1, 1]
//top[1]->gt
//LOG(INFO)<<"Enter into the RNN layer............";
int roi_num = bottom[0]->num();
int dim = bottom[0]->channels();
int out_num = 2 * roi_num - batch_size;
//LOG(INFO)<<"batch_size is "<<batch_size;
top[0]->Reshape(out_num, dim, 1, 1);
top[1]->Reshape(out_num, 1, 1, 1);
const Dtype* feature = bottom[0]->gpu_data();
const Dtype* id = bottom[1]->cpu_data();
const Dtype* roi = bottom[2]->cpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//note for cpu
Dtype* top_label = top[1]->mutable_cpu_data();
//id设置为0
map_id_ = 0;
//tree loss is set to 0
loss = (Dtype)0.;
int hit_1 = 0, hit_2 = 0, hit_3 = 0, hit_4 = 0, total_gt = 0;
// __asm__("int $3");
Dtype total_loss = 0;
tree_loss.clear();
for(int batch_id = 0; batch_id < batch_size; batch_id ++){
//get segmentation size
int index = id[batch_id];
//read gt
gt = ReadFileToVector(gt_folder + lines_[index] + ".txt");
//read adj
vec_adj = ReadFileToVector(adj_folder + lines_[index] + ".txt");
//read class
seg_class = ReadFileToVector(class_folder + lines_[index] + ".txt");
seg_size = seg_class.size();
//__asm__("int $3");
vector<Node<Dtype>*> tree;
tree.clear();
//CPUTimer timer;
//timer.Start();
total_num = 2 * seg_size - 1;
DLOG(INFO)<<"Forward seg_size is "<<seg_size;
for(int i = 0; i < seg_size; i++){
tree.push_back(new Node<Dtype>());
tree[i]->bottom.push_back(new Blob<Dtype>(1, dim, 1, 1));
//LOG(INFO)<<"map_id_ should be -1"<<tree[i]->map_id;
//LOG(INFO)<<"is_leaf should be false"<<tree[i]->is_leaf;
tree[i]->map_id = map_id_;
tree[i]->is_leaf = true;
Dtype* src = tree[i]->bottom[0]->mutable_gpu_data();
caffe_copy(dim, feature + i * dim, src);
caffe_copy(dim, feature + i * dim, top_data + map_id_ * channels_);
for(int j = 1; j <= 4; ++j) tree[i]->rect.push_back(roi[j]);
top_label[map_id_] = label(gt, tree[i]->rect);
roi += bottom[2]->offset(1);
map_id_ ++;
}
vector<bool> adj(total_num * total_num, false);
int cnt = 0;
// read adj matrix
for(int i = 0; i < seg_size; i++){
for(int j = 0; j < seg_size; j++){
int offset = i * seg_size + j;
if(vec_adj[offset] == 1) {
adj[go(i,j)] = true;
cnt ++;
}
}
}
vector<Pair<Dtype> > pair;
pair.clear();
//__asm__("int $3");
CPUTimer time;
time.Start();
Blob<Dtype>* input = new Blob<Dtype>(cnt, 2 * channels_, 1, 1);
Blob<Dtype>* merge_score = new Blob<Dtype>(cnt, 1, 1, 1);
Dtype* input_feature = input->mutable_gpu_data();
for(int i = 0; i < seg_size; ++i){
for(int j = 0; j < seg_size; ++j){
const Dtype* feat1 = feature + i * dim;
const Dtype* feat2 = feature + j * dim;
if(adj[go(i,j)]){
caffe_copy(channels_, feat1, input_feature);
caffe_copy(channels_, feat2, input_feature + channels_);
//__asm__("int $3");
input_feature += 2 * channels_;
pair.push_back(Pair<Dtype>(i, j, 0));
}
}
}
get_score_gpu(input, merge_score);
//check for eq
CHECK_EQ(pair.size(), merge_score->count());
for(int i = 0; i < pair.size(); ++i) {
pair[i].score = merge_score->cpu_data()[i];
}
//__asm__("int $3");
delete input;
delete merge_score;
time.Stop();
DLOG(INFO)<<"Initial cal pairs "<<time.MilliSeconds() << "ms.";
//__asm__("int $3");
//LOG(INFO)<<"modify the tree...............";
//结构正确的得分最高的树
CPUTimer timer;
timer.Start();
if(this->layer_param_.phase() == caffe::TEST){
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, true, true);
feature += seg_size * dim;
vector<int> ret = get_recall(tree, true);
//__asm__("int $3");
hit_1 += ret[0];
hit_2 += ret[1];
total_gt += gt.size() / 4;
//destroy this tree
for(int j = 0; j < tree.size(); j++){
delete tree[j]->bottom[0];
if(!tree[j]->is_leaf)
delete tree[j]->bottom[1];
delete tree[j];
}
continue;
}
loss = 0.0;
//得分最高的树
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, true, false);
//LOG(INFO)<<"BUILD CORRECT TREE FINISHED!";
DLOG(INFO)<<debug(tree[total_num-1])<<" should equal to "<<total_num;
//__asm__("int $3");
build_tree_gpu(tree, seg_class, adj, pair, top_data, top_label, false, false);
DLOG(INFO)<<debug(tree[tree.size()-1])<<" should equal to "<<total_num;
timer.Stop();
DLOG(INFO)<<"RNN forward cost "<<timer.MilliSeconds() <<"ms.";
tree_loss.push_back(loss / (seg_size - 1));
//total_loss += log(1 + exp(loss / (seg_size - 1)));
total_loss += loss;
vector<int> ret = get_recall(tree, false);
//__asm__("int $3");
hit_1 += ret[0];
hit_2 += ret[1];
hit_3 += ret[2];
hit_4 += ret[3];
total_gt += gt.size() / 4;
forest.push_back(tree);
feature += seg_size * dim;
}
if(this->layer_param_.phase() == TEST)
LOG(INFO)<<"Test "<<total_loss / batch_size << " " <<hit_1<<" "<<hit_2<<" "<<" "<<hit_3<<" "<<hit_4<<" "<<total_gt;
else
LOG(INFO)<<"TRAIN "<<total_loss / batch_size <<" "<<hit_1<<" "<<hit_2<<" "<<" "<<hit_3<<" "<<hit_4<<" "<<total_gt;
top[2]->mutable_cpu_data()[0] = total_loss / batch_size / 2;
top[3]->mutable_cpu_data()[0] = (Dtype)1.0 * hit_1 / total_gt;
top[3]->mutable_cpu_data()[1] = (Dtype)1.0 * hit_2 / total_gt;
top[3]->mutable_cpu_data()[2] = (Dtype)1.0 * hit_3 / total_gt;
top[3]->mutable_cpu_data()[3] = (Dtype)1.0 * hit_4 / total_gt;
}
template<typename Dtype>
int RNNBaseLayer<Dtype>::debug(Node<Dtype>* root){
if(root->is_leaf) return 1;
int num = 1;
num += debug(root->left);
num += debug(root->right);
return num;
}
template<typename Dtype>
void RNNBaseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
//int roi_num = bottom[0]->num();
int dim = bottom[0]->channels();
//LOG(INFO)<<"dim is ..."<<dim;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
//CPUTimer time;
//time.Start();
for(int i = 0; i < batch_size; i++){
selected_num = i;
vector<Node<Dtype>*> tree = forest[i];
seg_size = (tree.size() + 2) / 3;
total_num = 2 * seg_size - 1;
//first correct tree
std::stack<Node<Dtype>*> recursive;
//__asm__("int $3");
recursive.push(tree[total_num - 1]);
while(!recursive.empty()){
Node<Dtype>* top_node = recursive.top();
Backward_gpu(top_node->left, top_node->right, top_node, top_diff, true);
recursive.pop();
if(top_node->right != NULL) recursive.push(top_node->right);
if(top_node->left != NULL) recursive.push(top_node->left);
}
//LOG(INFO)<<"should enter count is "<<cnt<<" "<<total_num;
//high score tree
recursive.push(tree[tree.size() - 1]);
while(!recursive.empty()){
Node<Dtype>* top_node = recursive.top();
//LOG(INFO)<<"enter ....";
Backward_gpu(top_node->left, top_node->right, top_node, top_diff, false);
recursive.pop();
if(top_node->right != NULL) recursive.push(top_node->right);
if(top_node->left != NULL) recursive.push(top_node->left);
}
//backward
//LOG(INFO)<<".......high tree finished...";
for(int j = 0; j < seg_size; j++){
caffe_copy(dim, tree[j]->bottom[0]->gpu_diff(), bottom_diff);
bottom_diff += dim;
}
}
const Dtype* test_weight = this->blobs_[0]->cpu_data();
//__asm__("int $3");
//time.Stop();
//LOG(INFO)<<"RNN backward cost "<<time.MilliSeconds() <<"ms.";
//LOG(INFO)<<"delete all space ....";
//delete all space
for(int i = 0; i < batch_size; i++){
vector<Node<Dtype>*> tree = forest[i];
for(int j = 0; j < tree.size(); j++){
delete tree[j]->bottom[0];
if(!tree[j]->is_leaf)
delete tree[j]->bottom[1];
delete tree[j];
}
}
forest.clear();
tree_loss.clear();
}
INSTANTIATE_LAYER_GPU_FUNCS(RNNBaseLayer);
}
|
fc89c998d0c46f47a53262c46c5f5ad150684368.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include "gdf/gdf_io.h"
#include "rmm.h"
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include "../../thrust_rmm_allocator.h"
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(rmmAlloc((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(hipMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
hipLaunchKernelGGL(( determineValidRecCount), dim3(blocks), dim3(threads), 0, 0, gdfData[x]->valid, numRows, numCol, offsets);
}
rmm_temp_allocator allocator(0); // TODO: non-default stream?
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(thrust::hip::par(allocator).on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( hipMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), hipMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
size_t * IA;
RMM_TRY(rmmAlloc((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(hipMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), hipMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( rmmAlloc((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(rmmFree(IA, 0));
RMM_TRY(rmmFree(JA, 0));
RMM_TRY(rmmFree(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(rmmFree(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(rmmAlloc((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(hipMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
hipLaunchKernelGGL(( cudaCreateCSR<T>), dim3(blocks), dim3(threads), 0, 0, gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
fc89c998d0c46f47a53262c46c5f5ad150684368.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include "gdf/gdf_io.h"
#include "rmm.h"
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include "../../thrust_rmm_allocator.h"
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(rmmAlloc((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(cudaMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
determineValidRecCount<<<blocks, threads>>>(gdfData[x]->valid, numRows, numCol, offsets);
}
rmm_temp_allocator allocator(0); // TODO: non-default stream?
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(thrust::cuda::par(allocator).on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( cudaMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), cudaMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
size_t * IA;
RMM_TRY(rmmAlloc((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(cudaMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), cudaMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( rmmAlloc((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(rmmFree(IA, 0));
RMM_TRY(rmmFree(JA, 0));
RMM_TRY(rmmFree(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(rmmFree(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(rmmAlloc((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(cudaMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
cudaCreateCSR<T><<<blocks, threads>>>(gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
f9d8b250da782de706f7c968d940ff02be5205f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void drawHeart(int CIRCLE_SEGMENTS, float *xx, float*yy) {
float scale = 0.5f;
int i = threadIdx.y*CIRCLE_SEGMENTS + threadIdx.x;
float const theta = 2.0f * 3.1415926f * (float)i / (float)CIRCLE_SEGMENTS;
xx[i] = scale * 16.0f * sinf(theta) * sinf(theta) * sinf(theta);
yy[i] = -1 * scale * (13.0f * cosf(theta) - 5.0f * cosf(2.0f * theta) - 2 * cosf(3.0f * theta) - cosf(4.0f * theta));
}
|
f9d8b250da782de706f7c968d940ff02be5205f1.cu
|
#include "includes.h"
__global__ void drawHeart(int CIRCLE_SEGMENTS, float *xx, float*yy) {
float scale = 0.5f;
int i = threadIdx.y*CIRCLE_SEGMENTS + threadIdx.x;
float const theta = 2.0f * 3.1415926f * (float)i / (float)CIRCLE_SEGMENTS;
xx[i] = scale * 16.0f * sinf(theta) * sinf(theta) * sinf(theta);
yy[i] = -1 * scale * (13.0f * cosf(theta) - 5.0f * cosf(2.0f * theta) - 2 * cosf(3.0f * theta) - cosf(4.0f * theta));
}
|
27ab35ce4ffa91254cd23809327a86355aeca96b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "sigmoid_cross_entropy_loss_op.h"
namespace caffe2 {
namespace {
__global__ void ElementwiseMaxKernel(const int n, float* data, const float a) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = (data[index] > a) ? data[index] : a;
}
}
__global__ void SigmoidCrossEntropyLossKernel(
const int n,
const float* logits,
const int* targets,
float* losses,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
losses[index] = 0.;
counts[index] = 0.;
} else {
losses[index] =
-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(
1 +
expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
counts[index] = 1.;
}
}
}
__global__ void SigmoidCrossEntropyLossGradientKernel(
const int n,
const float* logits,
const int* targets,
float* d_logits,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index];
counts[index] = 1.;
}
}
}
} // namespace
template <>
bool SigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto* avg_loss = Output(0);
CAFFE_ENFORCE(
X.size() == T.size(),
"Logit and target must have the same size",
"(",
X.size(),
" vs. ",
T.size(),
")");
avg_loss->Resize(vector<TIndex>());
counts_.ResizeLike(X);
losses_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
hipLaunchKernelGGL(( SigmoidCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
X.data<float>(),
T.data<int>(),
losses_.mutable_data<float>(),
counts_.mutable_data<float>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
hipLaunchKernelGGL(( ElementwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(normalizer_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1, avg_loss_data, normalizer_data, avg_loss_data, &context_);
}
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template <>
bool SigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto& d_avg_loss = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
counts_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
hipLaunchKernelGGL(( SigmoidCrossEntropyLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
X.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
counts_.mutable_data<float>());
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
hipLaunchKernelGGL(( ElementwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(normalizer_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1,
d_avg_loss.data<float>(),
normalizer_data,
normalizer_data,
&context_);
math::Scale<float, float, CUDAContext>(
1, scale_, normalizer_data, normalizer_data, &context_);
math::Scale<float, float, CUDAContext>(
dX->size(),
normalizer_data,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
} else {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyLoss,
SigmoidCrossEntropyLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyLossGradient,
SigmoidCrossEntropyLossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
27ab35ce4ffa91254cd23809327a86355aeca96b.cu
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "sigmoid_cross_entropy_loss_op.h"
namespace caffe2 {
namespace {
__global__ void ElementwiseMaxKernel(const int n, float* data, const float a) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = (data[index] > a) ? data[index] : a;
}
}
__global__ void SigmoidCrossEntropyLossKernel(
const int n,
const float* logits,
const int* targets,
float* losses,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
losses[index] = 0.;
counts[index] = 0.;
} else {
losses[index] =
-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(
1 +
expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
counts[index] = 1.;
}
}
}
__global__ void SigmoidCrossEntropyLossGradientKernel(
const int n,
const float* logits,
const int* targets,
float* d_logits,
float* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index];
counts[index] = 1.;
}
}
}
} // namespace
template <>
bool SigmoidCrossEntropyLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto* avg_loss = Output(0);
CAFFE_ENFORCE(
X.size() == T.size(),
"Logit and target must have the same size",
"(",
X.size(),
" vs. ",
T.size(),
")");
avg_loss->Resize(vector<TIndex>());
counts_.ResizeLike(X);
losses_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
SigmoidCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
X.data<float>(),
T.data<int>(),
losses_.mutable_data<float>(),
counts_.mutable_data<float>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
ElementwiseMaxKernel<<<
CAFFE_GET_BLOCKS(normalizer_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1, avg_loss_data, normalizer_data, avg_loss_data, &context_);
}
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template <>
bool SigmoidCrossEntropyLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& T = Input(1);
auto& d_avg_loss = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
counts_.ResizeLike(X);
normalizer_.Resize(vector<TIndex>());
SigmoidCrossEntropyLossGradientKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
X.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
counts_.mutable_data<float>());
if (normalize_) {
float* normalizer_data = normalizer_.mutable_data<float>();
math::Sum<float, CUDAContext>(
counts_.size(), counts_.data<float>(), normalizer_data, &context_);
// Prevent division by zero is all counts are zero
ElementwiseMaxKernel<<<
CAFFE_GET_BLOCKS(normalizer_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(normalizer_.size(), normalizer_data, 1e-5);
math::Div<float, CUDAContext>(
1,
d_avg_loss.data<float>(),
normalizer_data,
normalizer_data,
&context_);
math::Scale<float, float, CUDAContext>(
1, scale_, normalizer_data, normalizer_data, &context_);
math::Scale<float, float, CUDAContext>(
dX->size(),
normalizer_data,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
} else {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyLoss,
SigmoidCrossEntropyLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyLossGradient,
SigmoidCrossEntropyLossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
e4422f6b478dd36d444e41f7f5916089f9a9b1d2.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <numeric>
namespace cudf {
// Trivially copy all members but the children
column_device_view::column_device_view(column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
// Free device memory allocated for children
void column_device_view::destroy() { delete this; }
namespace {
// helper function for column_device_view::create and mutable_column_device::create methods
template <typename ColumnView, typename ColumnDeviceView>
std::unique_ptr<ColumnDeviceView, std::function<void(ColumnDeviceView*)>>
create_device_view_from_view(ColumnView const& source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
// First calculate the size of memory needed to hold the child columns. This is done by calling
// extent() for each of the children.
auto get_extent = cudf::detail::make_counting_transform_iterator(
0, [&source](auto i) { return ColumnDeviceView::extent(source.child(i)); });
// pad the allocation for aligning the first pointer
auto const descendant_storage_bytes = std::accumulate(
get_extent, get_extent + num_children, std::size_t{alignof(ColumnDeviceView) - 1});
// A buffer of CPU memory is allocated to hold the ColumnDeviceView
// objects. Once filled, the CPU memory is copied to device memory
// and then set into the d_children member pointer.
std::vector<char> staging_buffer(descendant_storage_bytes);
// Each ColumnDeviceView instance may have child objects that
// require setting some internal device pointers before being copied
// from CPU to device.
rmm::device_buffer* const descendant_storage =
new rmm::device_buffer(descendant_storage_bytes, stream);
auto deleter = [descendant_storage](ColumnDeviceView* v) {
v->destroy();
delete descendant_storage;
};
std::unique_ptr<ColumnDeviceView, decltype(deleter)> result{
new ColumnDeviceView(source, staging_buffer.data(), descendant_storage->data()), deleter};
// copy the CPU memory with all the children into device memory
CUDA_TRY(hipMemcpyAsync(descendant_storage->data(),
staging_buffer.data(),
descendant_storage->size(),
hipMemcpyDefault,
stream.value()));
stream.synchronize();
return result;
}
} // namespace
// Place any child objects in host memory (h_ptr) and use the device
// memory ptr (d_ptr) to set any child object pointers.
column_device_view::column_device_view(column_view source, void* h_ptr, void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<column_device_view, std::function<void(column_device_view*)>>
column_device_view::create(column_view source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
if (num_children == 0) {
// Can't use make_unique since the ctor is protected
return std::unique_ptr<column_device_view>(new column_device_view(source));
}
return create_device_view_from_view<column_view, column_device_view>(source, stream);
}
std::size_t column_device_view::extent(column_view const& source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(column_device_view));
}
// For use with inplace-new to pre-fill memory to be copied to device
mutable_column_device_view::mutable_column_device_view(mutable_column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
mutable_column_device_view::mutable_column_device_view(mutable_column_view source,
void* h_ptr,
void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<mutable_column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Handle freeing children
void mutable_column_device_view::destroy() { delete this; }
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>>
mutable_column_device_view::create(mutable_column_view source, rmm::cuda_stream_view stream)
{
return source.num_children() == 0
? std::unique_ptr<mutable_column_device_view>(new mutable_column_device_view(source))
: create_device_view_from_view<mutable_column_view, mutable_column_device_view>(source,
stream);
}
std::size_t mutable_column_device_view::extent(mutable_column_view source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(mutable_column_device_view));
}
} // namespace cudf
|
e4422f6b478dd36d444e41f7f5916089f9a9b1d2.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <numeric>
namespace cudf {
// Trivially copy all members but the children
column_device_view::column_device_view(column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
// Free device memory allocated for children
void column_device_view::destroy() { delete this; }
namespace {
// helper function for column_device_view::create and mutable_column_device::create methods
template <typename ColumnView, typename ColumnDeviceView>
std::unique_ptr<ColumnDeviceView, std::function<void(ColumnDeviceView*)>>
create_device_view_from_view(ColumnView const& source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
// First calculate the size of memory needed to hold the child columns. This is done by calling
// extent() for each of the children.
auto get_extent = cudf::detail::make_counting_transform_iterator(
0, [&source](auto i) { return ColumnDeviceView::extent(source.child(i)); });
// pad the allocation for aligning the first pointer
auto const descendant_storage_bytes = std::accumulate(
get_extent, get_extent + num_children, std::size_t{alignof(ColumnDeviceView) - 1});
// A buffer of CPU memory is allocated to hold the ColumnDeviceView
// objects. Once filled, the CPU memory is copied to device memory
// and then set into the d_children member pointer.
std::vector<char> staging_buffer(descendant_storage_bytes);
// Each ColumnDeviceView instance may have child objects that
// require setting some internal device pointers before being copied
// from CPU to device.
rmm::device_buffer* const descendant_storage =
new rmm::device_buffer(descendant_storage_bytes, stream);
auto deleter = [descendant_storage](ColumnDeviceView* v) {
v->destroy();
delete descendant_storage;
};
std::unique_ptr<ColumnDeviceView, decltype(deleter)> result{
new ColumnDeviceView(source, staging_buffer.data(), descendant_storage->data()), deleter};
// copy the CPU memory with all the children into device memory
CUDA_TRY(cudaMemcpyAsync(descendant_storage->data(),
staging_buffer.data(),
descendant_storage->size(),
cudaMemcpyDefault,
stream.value()));
stream.synchronize();
return result;
}
} // namespace
// Place any child objects in host memory (h_ptr) and use the device
// memory ptr (d_ptr) to set any child object pointers.
column_device_view::column_device_view(column_view source, void* h_ptr, void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<column_device_view, std::function<void(column_device_view*)>>
column_device_view::create(column_view source, rmm::cuda_stream_view stream)
{
size_type num_children = source.num_children();
if (num_children == 0) {
// Can't use make_unique since the ctor is protected
return std::unique_ptr<column_device_view>(new column_device_view(source));
}
return create_device_view_from_view<column_view, column_device_view>(source, stream);
}
std::size_t column_device_view::extent(column_view const& source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(column_device_view));
}
// For use with inplace-new to pre-fill memory to be copied to device
mutable_column_device_view::mutable_column_device_view(mutable_column_view source)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
}
mutable_column_device_view::mutable_column_device_view(mutable_column_view source,
void* h_ptr,
void* d_ptr)
: detail::column_device_view_base{source.type(),
source.size(),
source.head(),
source.null_mask(),
source.offset()},
_num_children{source.num_children()}
{
d_children = detail::child_columns_to_device_array<mutable_column_device_view>(
source.child_begin(), source.child_end(), h_ptr, d_ptr);
}
// Handle freeing children
void mutable_column_device_view::destroy() { delete this; }
// Construct a unique_ptr that invokes `destroy()` as it's deleter
std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>>
mutable_column_device_view::create(mutable_column_view source, rmm::cuda_stream_view stream)
{
return source.num_children() == 0
? std::unique_ptr<mutable_column_device_view>(new mutable_column_device_view(source))
: create_device_view_from_view<mutable_column_view, mutable_column_device_view>(source,
stream);
}
std::size_t mutable_column_device_view::extent(mutable_column_view source)
{
auto get_extent = thrust::make_transform_iterator(
thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); });
return std::accumulate(
get_extent, get_extent + source.num_children(), sizeof(mutable_column_device_view));
}
} // namespace cudf
|
4937d5089a7c2265fadec574059a0205034d5e56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of WarpReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <cub/warp/warp_reduce.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<
typename OpT,
int LOGICAL_WARP_THREADS>
struct WrapperFunctor
{
OpT op;
int num_valid;
inline __host__ __device__ WrapperFunctor(OpT op, int num_valid) : op(op), num_valid(num_valid) {}
template <typename T>
inline __host__ __device__ T operator()(const T &a, const T &b) const
{
#if CUB_PTX_ARCH != 0
if ((cub::LaneId() % LOGICAL_WARP_THREADS) >= num_valid)
cub::ThreadTrap();
#endif
return op(a, b);
}
};
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Generic reduction
*/
template <
typename T,
typename ReductionOp,
typename WarpReduce,
bool PRIMITIVE = Traits<T>::PRIMITIVE>
struct DeviceTest
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedReduce(data, flag, reduction_op);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedReduce(data, flag, reduction_op);
}
};
/**
* Summation
*/
template <
typename T,
typename WarpReduce>
struct DeviceTest<T, Sum, WarpReduce, true>
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op)
{
return WarpReduce(temp_storage).Sum(data);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Sum(data, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedSum(data, flag);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedSum(data, flag);
}
};
/**
* Full-tile warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void FullWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type (1 warp)
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Partially-full warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void PartialWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed,
int valid_warp_threads)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test partial-warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op, valid_warp_threads);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Head-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpHeadSegmentedReduceKernel(
T *d_in,
FlagT *d_head_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT head_flag = d_head_flags[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::HeadSegmentedReduce(
temp_storage[warp_id], input, head_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
/**
* Tail-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpTailSegmentedReduceKernel(
T *d_in,
FlagT *d_tail_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT tail_flag = d_tail_flags[threadIdx.x];
FlagT head_flag = (threadIdx.x == 0) ?
0 :
d_tail_flags[threadIdx.x - 1];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::TailSegmentedReduce(
temp_storage[warp_id], input, tail_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
int flag_entropy,
T *h_in,
int *h_flags,
int warps,
int warp_threads,
int valid_warp_threads,
ReductionOp reduction_op,
T *h_head_out,
T *h_tail_out)
{
for (int i = 0; i < warps * warp_threads; ++i)
{
// Sample a value for this item
InitValue(gen_mode, h_in[i], i);
h_head_out[i] = h_in[i];
h_tail_out[i] = h_in[i];
// Sample whether or not this item will be a segment head
char bits;
RandomBits(bits, flag_entropy);
h_flags[i] = bits & 0x1;
}
// Accumulate segments (lane 0 of each warp is implicitly a segment head)
for (int warp = 0; warp < warps; ++warp)
{
int warp_offset = warp * warp_threads;
int item_offset = warp_offset + valid_warp_threads - 1;
// Last item in warp
T head_aggregate = h_in[item_offset];
T tail_aggregate = h_in[item_offset];
if (h_flags[item_offset])
h_head_out[item_offset] = head_aggregate;
item_offset--;
// Work backwards
while (item_offset >= warp_offset)
{
if (h_flags[item_offset + 1])
{
head_aggregate = h_in[item_offset];
}
else
{
head_aggregate = reduction_op(head_aggregate, h_in[item_offset]);
}
if (h_flags[item_offset])
{
h_head_out[item_offset] = head_aggregate;
h_tail_out[item_offset + 1] = tail_aggregate;
tail_aggregate = h_in[item_offset];
}
else
{
tail_aggregate = reduction_op(tail_aggregate, h_in[item_offset]);
}
item_offset--;
}
// Record last segment head_aggregate to head offset
h_head_out[warp_offset] = head_aggregate;
h_tail_out[warp_offset] = tail_aggregate;
}
}
/**
* Test warp reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestReduce(
GenMode gen_mode,
ReductionOp reduction_op,
int valid_warp_threads = LOGICAL_WARP_THREADS)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, -1, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, valid_warp_threads, reduction_op, h_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), valid_warp_threads);
}
// Run kernel
printf("\nGen-mode %d, %d warps, %d warp threads, %d valid lanes, %s (%d bytes) elements:\n",
gen_mode,
WARPS,
LOGICAL_WARP_THREADS,
valid_warp_threads,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
if (valid_warp_threads == LOGICAL_WARP_THREADS)
{
// Run full-warp kernel
hipLaunchKernelGGL(( FullWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
reduction_op,
d_elapsed);
}
else
{
// Run partial-warp kernel
hipLaunchKernelGGL(( PartialWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
reduction_op,
d_elapsed,
valid_warp_threads);
}
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_out, d_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_out) delete[] h_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test warp segmented reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestSegmentedReduce(
GenMode gen_mode,
int flag_entropy,
ReductionOp reduction_op)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
int compare;
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_head_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, flag_entropy, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, LOGICAL_WARP_THREADS, reduction_op, h_head_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
int *d_flags = NULL;
T *d_head_out = NULL;
T *d_tail_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(int) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_head_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_tail_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(int) * BLOCK_THREADS, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_head_out, 0, sizeof(T) * BLOCK_THREADS));
CubDebugExit(hipMemset(d_tail_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
printf("\nFlags:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_flags + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
}
printf("\nGen-mode %d, head flag entropy reduction %d, %d warps, %d warp threads, %s (%d bytes) elements:\n",
gen_mode,
flag_entropy,
WARPS,
LOGICAL_WARP_THREADS,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run head-based kernel
hipLaunchKernelGGL(( WarpHeadSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_flags,
d_head_out,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tHead-based segmented reduction results: ");
compare = CompareDeviceResults(h_head_out, d_head_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Run tail-based kernel
hipLaunchKernelGGL(( WarpTailSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_flags,
d_tail_out,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tTail-based segmented reduction results: ");
compare = CompareDeviceResults(h_tail_out, d_tail_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_head_out) delete[] h_head_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_head_out) CubDebugExit(g_allocator.DeviceFree(d_head_out));
if (d_tail_out) CubDebugExit(g_allocator.DeviceFree(d_tail_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Run battery of tests for different full and partial tile sizes
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void Test(
GenMode gen_mode,
ReductionOp reduction_op)
{
// Partial tiles
for (
int valid_warp_threads = 1;
valid_warp_threads < LOGICAL_WARP_THREADS;
valid_warp_threads += CUB_MAX(1, LOGICAL_WARP_THREADS / 5))
{
// Without wrapper (to test non-excepting PTX POD-op specializations)
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, valid_warp_threads);
// With wrapper to ensure no ops called on OOB lanes
WrapperFunctor<ReductionOp, LOGICAL_WARP_THREADS> wrapped_op(reduction_op, valid_warp_threads);
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, wrapped_op, valid_warp_threads);
}
// Full tile
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, LOGICAL_WARP_THREADS);
// Segmented reduction with different head flags
for (int flag_entropy = 0; flag_entropy < 10; ++flag_entropy)
{
TestSegmentedReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, flag_entropy, reduction_op);
}
}
/**
* Run battery of tests for different data types and reduce ops
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test(GenMode gen_mode)
{
// primitive
Test<WARPS, LOGICAL_WARP_THREADS, char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, long long>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Sum());
if (gen_mode != RANDOM)
{
Test<WARPS, LOGICAL_WARP_THREADS, float>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, double>( gen_mode, Sum());
}
// primitive (alternative reduce op)
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Max());
// vec-1
Test<WARPS, LOGICAL_WARP_THREADS, uchar1>( gen_mode, Sum());
// vec-2
Test<WARPS, LOGICAL_WARP_THREADS, uchar2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong2>( gen_mode, Sum());
// vec-4
Test<WARPS, LOGICAL_WARP_THREADS, uchar4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong4>( gen_mode, Sum());
// complex
Test<WARPS, LOGICAL_WARP_THREADS, TestFoo>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, TestBar>( gen_mode, Sum());
}
/**
* Run battery of tests for different problem generation options
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test()
{
Test<WARPS, LOGICAL_WARP_THREADS>(UNIFORM);
Test<WARPS, LOGICAL_WARP_THREADS>(INTEGER_SEED);
Test<WARPS, LOGICAL_WARP_THREADS>(RANDOM);
}
/**
* Run battery of tests for different number of active warps
*/
template <int LOGICAL_WARP_THREADS>
void Test()
{
Test<1, LOGICAL_WARP_THREADS>();
Test<2, LOGICAL_WARP_THREADS>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
// Compile/run quick tests
TestReduce<1, 32, int>(UNIFORM, Sum());
TestReduce<1, 32, double>(UNIFORM, Sum());
TestReduce<2, 16, TestBar>(UNIFORM, Sum());
TestSegmentedReduce<1, 32, int>(UNIFORM, 1, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test logical warp sizes
Test<32>();
Test<16>();
Test<9>();
Test<7>();
}
#endif
return 0;
}
|
4937d5089a7c2265fadec574059a0205034d5e56.cu
|
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of WarpReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <cub/warp/warp_reduce.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
/**
* \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants)
*/
template<
typename OpT,
int LOGICAL_WARP_THREADS>
struct WrapperFunctor
{
OpT op;
int num_valid;
inline __host__ __device__ WrapperFunctor(OpT op, int num_valid) : op(op), num_valid(num_valid) {}
template <typename T>
inline __host__ __device__ T operator()(const T &a, const T &b) const
{
#if CUB_PTX_ARCH != 0
if ((cub::LaneId() % LOGICAL_WARP_THREADS) >= num_valid)
cub::ThreadTrap();
#endif
return op(a, b);
}
};
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Generic reduction
*/
template <
typename T,
typename ReductionOp,
typename WarpReduce,
bool PRIMITIVE = Traits<T>::PRIMITIVE>
struct DeviceTest
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
ReductionOp &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Reduce(data, reduction_op, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedReduce(data, flag, reduction_op);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
ReductionOp &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedReduce(data, flag, reduction_op);
}
};
/**
* Summation
*/
template <
typename T,
typename WarpReduce>
struct DeviceTest<T, Sum, WarpReduce, true>
{
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op)
{
return WarpReduce(temp_storage).Sum(data);
}
static __device__ __forceinline__ T Reduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
Sum &reduction_op,
const int &valid_warp_threads)
{
return WarpReduce(temp_storage).Sum(data, valid_warp_threads);
}
template <typename FlagT>
static __device__ __forceinline__ T HeadSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).HeadSegmentedSum(data, flag);
}
template <typename FlagT>
static __device__ __forceinline__ T TailSegmentedReduce(
typename WarpReduce::TempStorage &temp_storage,
T &data,
FlagT &flag,
Sum &reduction_op)
{
return WarpReduce(temp_storage).TailSegmentedSum(data, flag);
}
};
/**
* Full-tile warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void FullWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type (1 warp)
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Partially-full warp reduction kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
__global__ void PartialWarpReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed,
int valid_warp_threads)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test partial-warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::Reduce(
temp_storage[warp_id], input, reduction_op, valid_warp_threads);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = (threadIdx.x % LOGICAL_WARP_THREADS == 0) ?
output :
input;
}
/**
* Head-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpHeadSegmentedReduceKernel(
T *d_in,
FlagT *d_head_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT head_flag = d_head_flags[threadIdx.x];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::HeadSegmentedReduce(
temp_storage[warp_id], input, head_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
/**
* Tail-based segmented warp reduction test kernel
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename FlagT,
typename ReductionOp>
__global__ void WarpTailSegmentedReduceKernel(
T *d_in,
FlagT *d_tail_flags,
T *d_out,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative warp-reduce utility type
typedef WarpReduce<T, LOGICAL_WARP_THREADS> WarpReduce;
// Allocate temp storage in shared memory
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS];
// Per-thread tile data
T input = d_in[threadIdx.x];
FlagT tail_flag = d_tail_flags[threadIdx.x];
FlagT head_flag = (threadIdx.x == 0) ?
0 :
d_tail_flags[threadIdx.x - 1];
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t start = clock();
__threadfence_block(); // workaround to prevent clock hoisting
// Test segmented warp reduce
int warp_id = threadIdx.x / LOGICAL_WARP_THREADS;
T output = DeviceTest<T, ReductionOp, WarpReduce>::TailSegmentedReduce(
temp_storage[warp_id], input, tail_flag, reduction_op);
// Record elapsed clocks
__threadfence_block(); // workaround to prevent clock hoisting
clock_t stop = clock();
__threadfence_block(); // workaround to prevent clock hoisting
*d_elapsed = stop - start;
// Store aggregate
d_out[threadIdx.x] = ((threadIdx.x % LOGICAL_WARP_THREADS == 0) || head_flag) ?
output :
input;
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
int flag_entropy,
T *h_in,
int *h_flags,
int warps,
int warp_threads,
int valid_warp_threads,
ReductionOp reduction_op,
T *h_head_out,
T *h_tail_out)
{
for (int i = 0; i < warps * warp_threads; ++i)
{
// Sample a value for this item
InitValue(gen_mode, h_in[i], i);
h_head_out[i] = h_in[i];
h_tail_out[i] = h_in[i];
// Sample whether or not this item will be a segment head
char bits;
RandomBits(bits, flag_entropy);
h_flags[i] = bits & 0x1;
}
// Accumulate segments (lane 0 of each warp is implicitly a segment head)
for (int warp = 0; warp < warps; ++warp)
{
int warp_offset = warp * warp_threads;
int item_offset = warp_offset + valid_warp_threads - 1;
// Last item in warp
T head_aggregate = h_in[item_offset];
T tail_aggregate = h_in[item_offset];
if (h_flags[item_offset])
h_head_out[item_offset] = head_aggregate;
item_offset--;
// Work backwards
while (item_offset >= warp_offset)
{
if (h_flags[item_offset + 1])
{
head_aggregate = h_in[item_offset];
}
else
{
head_aggregate = reduction_op(head_aggregate, h_in[item_offset]);
}
if (h_flags[item_offset])
{
h_head_out[item_offset] = head_aggregate;
h_tail_out[item_offset + 1] = tail_aggregate;
tail_aggregate = h_in[item_offset];
}
else
{
tail_aggregate = reduction_op(tail_aggregate, h_in[item_offset]);
}
item_offset--;
}
// Record last segment head_aggregate to head offset
h_head_out[warp_offset] = head_aggregate;
h_tail_out[warp_offset] = tail_aggregate;
}
}
/**
* Test warp reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestReduce(
GenMode gen_mode,
ReductionOp reduction_op,
int valid_warp_threads = LOGICAL_WARP_THREADS)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, -1, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, valid_warp_threads, reduction_op, h_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), valid_warp_threads);
}
// Run kernel
printf("\nGen-mode %d, %d warps, %d warp threads, %d valid lanes, %s (%d bytes) elements:\n",
gen_mode,
WARPS,
LOGICAL_WARP_THREADS,
valid_warp_threads,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
if (valid_warp_threads == LOGICAL_WARP_THREADS)
{
// Run full-warp kernel
FullWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_out,
reduction_op,
d_elapsed);
}
else
{
// Run partial-warp kernel
PartialWarpReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_out,
reduction_op,
d_elapsed,
valid_warp_threads);
}
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_out, d_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_out) delete[] h_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test warp segmented reduction
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void TestSegmentedReduce(
GenMode gen_mode,
int flag_entropy,
ReductionOp reduction_op)
{
const int BLOCK_THREADS = LOGICAL_WARP_THREADS * WARPS;
// Allocate host arrays
int compare;
T *h_in = new T[BLOCK_THREADS];
int *h_flags = new int[BLOCK_THREADS];
T *h_head_out = new T[BLOCK_THREADS];
T *h_tail_out = new T[BLOCK_THREADS];
// Initialize problem
Initialize(gen_mode, flag_entropy, h_in, h_flags, WARPS, LOGICAL_WARP_THREADS, LOGICAL_WARP_THREADS, reduction_op, h_head_out, h_tail_out);
// Initialize/clear device arrays
T *d_in = NULL;
int *d_flags = NULL;
T *d_head_out = NULL;
T *d_tail_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(int) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_head_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_tail_out, sizeof(T) * BLOCK_THREADS));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(int) * BLOCK_THREADS, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_head_out, 0, sizeof(T) * BLOCK_THREADS));
CubDebugExit(cudaMemset(d_tail_out, 0, sizeof(T) * BLOCK_THREADS));
if (g_verbose)
{
printf("Data:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_in + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
printf("\nFlags:\n");
for (int i = 0; i < WARPS; ++i)
DisplayResults(h_flags + (i * LOGICAL_WARP_THREADS), LOGICAL_WARP_THREADS);
}
printf("\nGen-mode %d, head flag entropy reduction %d, %d warps, %d warp threads, %s (%d bytes) elements:\n",
gen_mode,
flag_entropy,
WARPS,
LOGICAL_WARP_THREADS,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
// Run head-based kernel
WarpHeadSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_flags,
d_head_out,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tHead-based segmented reduction results: ");
compare = CompareDeviceResults(h_head_out, d_head_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Run tail-based kernel
WarpTailSegmentedReduceKernel<WARPS, LOGICAL_WARP_THREADS><<<1, BLOCK_THREADS>>>(
d_in,
d_flags,
d_tail_out,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tTail-based segmented reduction results: ");
compare = CompareDeviceResults(h_tail_out, d_tail_out, BLOCK_THREADS, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (h_flags) delete[] h_flags;
if (h_head_out) delete[] h_head_out;
if (h_tail_out) delete[] h_tail_out;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_head_out) CubDebugExit(g_allocator.DeviceFree(d_head_out));
if (d_tail_out) CubDebugExit(g_allocator.DeviceFree(d_tail_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Run battery of tests for different full and partial tile sizes
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS,
typename T,
typename ReductionOp>
void Test(
GenMode gen_mode,
ReductionOp reduction_op)
{
// Partial tiles
for (
int valid_warp_threads = 1;
valid_warp_threads < LOGICAL_WARP_THREADS;
valid_warp_threads += CUB_MAX(1, LOGICAL_WARP_THREADS / 5))
{
// Without wrapper (to test non-excepting PTX POD-op specializations)
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, valid_warp_threads);
// With wrapper to ensure no ops called on OOB lanes
WrapperFunctor<ReductionOp, LOGICAL_WARP_THREADS> wrapped_op(reduction_op, valid_warp_threads);
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, wrapped_op, valid_warp_threads);
}
// Full tile
TestReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, reduction_op, LOGICAL_WARP_THREADS);
// Segmented reduction with different head flags
for (int flag_entropy = 0; flag_entropy < 10; ++flag_entropy)
{
TestSegmentedReduce<WARPS, LOGICAL_WARP_THREADS, T>(gen_mode, flag_entropy, reduction_op);
}
}
/**
* Run battery of tests for different data types and reduce ops
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test(GenMode gen_mode)
{
// primitive
Test<WARPS, LOGICAL_WARP_THREADS, char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, long long>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Sum());
if (gen_mode != RANDOM)
{
Test<WARPS, LOGICAL_WARP_THREADS, float>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, double>( gen_mode, Sum());
}
// primitive (alternative reduce op)
Test<WARPS, LOGICAL_WARP_THREADS, unsigned char>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned short>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned int>( gen_mode, Max());
Test<WARPS, LOGICAL_WARP_THREADS, unsigned long long>( gen_mode, Max());
// vec-1
Test<WARPS, LOGICAL_WARP_THREADS, uchar1>( gen_mode, Sum());
// vec-2
Test<WARPS, LOGICAL_WARP_THREADS, uchar2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint2>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong2>( gen_mode, Sum());
// vec-4
Test<WARPS, LOGICAL_WARP_THREADS, uchar4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ushort4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, uint4>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, ulonglong4>( gen_mode, Sum());
// complex
Test<WARPS, LOGICAL_WARP_THREADS, TestFoo>( gen_mode, Sum());
Test<WARPS, LOGICAL_WARP_THREADS, TestBar>( gen_mode, Sum());
}
/**
* Run battery of tests for different problem generation options
*/
template <
int WARPS,
int LOGICAL_WARP_THREADS>
void Test()
{
Test<WARPS, LOGICAL_WARP_THREADS>(UNIFORM);
Test<WARPS, LOGICAL_WARP_THREADS>(INTEGER_SEED);
Test<WARPS, LOGICAL_WARP_THREADS>(RANDOM);
}
/**
* Run battery of tests for different number of active warps
*/
template <int LOGICAL_WARP_THREADS>
void Test()
{
Test<1, LOGICAL_WARP_THREADS>();
Test<2, LOGICAL_WARP_THREADS>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
// Compile/run quick tests
TestReduce<1, 32, int>(UNIFORM, Sum());
TestReduce<1, 32, double>(UNIFORM, Sum());
TestReduce<2, 16, TestBar>(UNIFORM, Sum());
TestSegmentedReduce<1, 32, int>(UNIFORM, 1, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test logical warp sizes
Test<32>();
Test<16>();
Test<9>();
Test<7>();
}
#endif
return 0;
}
|
a2cba37e2eed1fcf19adaa313f97ed17a3721c9d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float color = 0.0f;
int filter_center = filterWidth / 2;
int c_x, c_y;
for (int fy = 0; fy < filterWidth; fy++) {
for (int fx = 0; fx < filterWidth; fx++) {
c_x = thread_2D_pos.x + fx - filter_center;
c_y = thread_2D_pos.y + fy - filter_center;
c_x = min(max(c_x, 0), numCols - 1);
c_y = min(max(c_y, 0), numRows - 1);
color += filter[fy * filterWidth + fx] * inputChannel[c_y * numCols + c_x];
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
uchar4 tmp = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = tmp.x;
greenChannel[thread_1D_pos] = tmp.y;
blueChannel[thread_1D_pos] = tmp.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
size_t memSize = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(hipMalloc(&d_filter, memSize));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, memSize, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32,32,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
a2cba37e2eed1fcf19adaa313f97ed17a3721c9d.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float color = 0.0f;
int filter_center = filterWidth / 2;
int c_x, c_y;
for (int fy = 0; fy < filterWidth; fy++) {
for (int fx = 0; fx < filterWidth; fx++) {
c_x = thread_2D_pos.x + fx - filter_center;
c_y = thread_2D_pos.y + fy - filter_center;
c_x = min(max(c_x, 0), numCols - 1);
c_y = min(max(c_y, 0), numRows - 1);
color += filter[fy * filterWidth + fx] * inputChannel[c_y * numCols + c_x];
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
uchar4 tmp = inputImageRGBA[thread_1D_pos];
redChannel[thread_1D_pos] = tmp.x;
greenChannel[thread_1D_pos] = tmp.y;
blueChannel[thread_1D_pos] = tmp.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
size_t memSize = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(cudaMalloc(&d_filter, memSize));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, memSize, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32,32,1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
e0aecfbe931d25d861aa5f1ac0e90a1ac675d87a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
#include <helper_math.h>
// includes, project
#include "2Dconvolution.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(int width, int height);
Matrix AllocateMatrix(int width, int height);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// nmulirea fr memorie partajat
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
float Cvalue = 0;
//se calculeaza pozitia elementulu din matricea P
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
//se retine in Cvalue suma dupa formula din enunt
for(i=0;i<5;++i){
for(j=0;j<5;j++){
if((row+i-2>=0) && (col+j-2>=0) && (row+i-2<N.height) && (col+j-2<N.width) && (row<N.height) && (col < N.width))
Cvalue += M.elements[i*5+j]*N.elements[(row+i-2)*N.width+col+j-2];
}
}
//se adauga in P pe pozitia row*N.width+col valoarea sumei
P.elements[row * N.width + col] =(float)Cvalue;
}
////////////////////////////////////////////////////////////////////////////////
// nmulirea cu memorie partajat
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernelShared(Matrix M, Matrix N, Matrix P)
{
//TODO: calculul rezultatului convoluiei
}
////////////////////////////////////////////////////////////////////////////////
// Returneaz 1 dac matricele sunt ~ egale
////////////////////////////////////////////////////////////////////////////////
int CompareMatrices(Matrix A, Matrix B)
{
int i;
if(A.width != B.width || A.height != B.height || A.pitch != B.pitch)
return 0;
int size = A.width * A.height;
for(i = 0; i < size; i++)
if(fabs(A.elements[i] - B.elements[i]) > MAX_ERR)
return 0;
return 1;
}
void GenerateRandomMatrix(Matrix m)
{
int i;
int size = m.width * m.height;
srand(time(NULL));
for(i = 0; i < size; i++)
m.elements[i] = rand() / (float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int width = 0, height = 0;
FILE *f, *out, *grafic;
if(argc < 2)
{
printf("Argumente prea puine, trimitei id-ul testului care trebuie rulat\n");
return 0;
}
char name[100];
sprintf(name, "./tests/test_%s.txt", argv[1]);
f = fopen(name, "r");
out = fopen("out.txt", "a");
grafic = fopen("grafic.txt", "a");
fscanf(f, "%d%d", &width, &height);
fprintf(grafic,"%d ",width*height);
Matrix M;//kernel de pe host
Matrix N;//matrice iniial de pe host
Matrix P;//rezultat fr memorie partajat calculat pe GPU
Matrix PS;//rezultatul cu memorie partajat calculat pe GPU
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE);
N = AllocateMatrix(width, height);
P = AllocateMatrix(width, height);
PS = AllocateMatrix(width, height);
GenerateRandomMatrix(M);
GenerateRandomMatrix(N);
// M * N pe device
ConvolutionOnDevice(M, N, P);
// M * N pe device cu memorie partajat
ConvolutionOnDeviceShared(M, N, PS);
// calculeaz rezultatul pe CPU pentru comparaie
Matrix reference = AllocateMatrix(P.width, P.height);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
fprintf(grafic,"%s","\n");
// verific dac rezultatul obinut pe device este cel ateptat
int res = CompareMatrices(reference, P);
printf("Test global %s\n", (1 == res) ? "PASSED" : "FAILED");
fprintf(out, "Test global %s %s\n", argv[1], (1 == res) ? "PASSED" : "FAILED");
// verific dac rezultatul obinut pe device cu memorie partajat este cel ateptat
// int ress = CompareMatrices(reference, PS);
int ress = CompareMatrices(reference, PS);
printf("Test shared %s\n", (1 == ress) ? "PASSED" : "FAILED");
fprintf(out, "Test shared %s %s\n", argv[1], (1 == ress) ? "PASSED" : "FAILED");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
FreeMatrix(&PS);
fclose(f);
fclose(out);
fclose(grafic);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
/*am adaugat variabila FILE * grafic
ce reprezinta fisierul de output pentru timp*/
Matrix Md, Nd, Pd; //matricele corespunztoare de pe device
FILE *grafic;
float timp;
grafic = fopen("grafic.txt", "a");
//pentru msurarea timpului de execuie n kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
//alocare matricele de pe device
Md = AllocateDeviceMatrix(M.width,M.height);
Nd = AllocateDeviceMatrix(N.width,N.height);
Pd = AllocateDeviceMatrix(N.width,N.height);
//copiere date de pe host (M, N) pe device (MD, Nd)
int size = M.width * M.height * sizeof(float);
hipMemcpy( Md.elements, M.elements, size, hipMemcpyHostToDevice);
size = N.width * N.height * sizeof(float);
hipMemcpy( Nd.elements, N.elements, size, hipMemcpyHostToDevice);
//setare configuraie de rulare a kernelului
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + BLOCK_SIZE - 1) /dimBlock.x,(N.height + BLOCK_SIZE - 1) /dimBlock.y);
sdkStartTimer(&kernelTime);
//lansare n execuie a kernelului
hipLaunchKernelGGL(( ConvolutionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
hipDeviceSynchronize();
sdkStopTimer(&kernelTime);
timp = sdkGetTimerValue(&kernelTime);
printf ("Timp execuie kernel: %f ms\n", timp);
fprintf(grafic,"%f ",timp);
//copiere rezultat pe host
size = N.width * N.height * sizeof(float);
hipMemcpy( P.elements, Pd.elements, size, hipMemcpyDeviceToHost);
//eliberarea memoriei matricelor de pe device
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
fclose(grafic);
}
//nu am realizat varianta cu memory shared
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P)
{
//Matrix Md, Nd, Pd; //matricele corespunztoare de pe device
FILE *grafic;
float timp;
grafic = fopen("grafic.txt", "a");
//pentru msurarea timpului de execuie n kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
//TODO: alocai matricele de pe device
//TODO: copiai datele de pe host (M, N) pe device (MD, Nd)
//TODO: setai configuraia de rulare a kernelului
sdkStartTimer(&kernelTime);
//TODO: lansai n execuie kernelul
hipDeviceSynchronize();
sdkStopTimer(&kernelTime);
timp = sdkGetTimerValue(&kernelTime);
printf ("Timp execuie kernel cu memorie partajat: %f ms\n", timp);
fprintf(grafic,"%f ",timp);
//TODO: copiai rezultatul pe host
//TODO: eliberai memoria matricelor de pe device
fclose(grafic);
}
// Aloc o matrice de dimensiune height*width pe device
Matrix AllocateDeviceMatrix(int width, int height)
{
//TODO: alocai matricea i setai width, pitch i height
Matrix m;
m.width = width;
m.height = height;
m.pitch = width;
size_t size = width * height * sizeof(float);
hipMalloc((void**)&m.elements,size);
return m;
}
// Aloc matrice pe host de dimensiune height*width
Matrix AllocateMatrix(int width, int height)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = (float*) malloc(size*sizeof(float));
return M;
}
// Elibereaz o matrice de pe device
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Elibereaz o matrice de pe host
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
e0aecfbe931d25d861aa5f1ac0e90a1ac675d87a.cu
|
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
#include <helper_math.h>
// includes, project
#include "2Dconvolution.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(int width, int height);
Matrix AllocateMatrix(int width, int height);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Înmulțirea fără memorie partajată
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
float Cvalue = 0;
//se calculeaza pozitia elementulu din matricea P
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
//se retine in Cvalue suma dupa formula din enunt
for(i=0;i<5;++i){
for(j=0;j<5;j++){
if((row+i-2>=0) && (col+j-2>=0) && (row+i-2<N.height) && (col+j-2<N.width) && (row<N.height) && (col < N.width))
Cvalue += M.elements[i*5+j]*N.elements[(row+i-2)*N.width+col+j-2];
}
}
//se adauga in P pe pozitia row*N.width+col valoarea sumei
P.elements[row * N.width + col] =(float)Cvalue;
}
////////////////////////////////////////////////////////////////////////////////
// Înmulțirea cu memorie partajată
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernelShared(Matrix M, Matrix N, Matrix P)
{
//TODO: calculul rezultatului convoluției
}
////////////////////////////////////////////////////////////////////////////////
// Returnează 1 dacă matricele sunt ~ egale
////////////////////////////////////////////////////////////////////////////////
int CompareMatrices(Matrix A, Matrix B)
{
int i;
if(A.width != B.width || A.height != B.height || A.pitch != B.pitch)
return 0;
int size = A.width * A.height;
for(i = 0; i < size; i++)
if(fabs(A.elements[i] - B.elements[i]) > MAX_ERR)
return 0;
return 1;
}
void GenerateRandomMatrix(Matrix m)
{
int i;
int size = m.width * m.height;
srand(time(NULL));
for(i = 0; i < size; i++)
m.elements[i] = rand() / (float)RAND_MAX;
}
////////////////////////////////////////////////////////////////////////////////
// main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int width = 0, height = 0;
FILE *f, *out, *grafic;
if(argc < 2)
{
printf("Argumente prea puține, trimiteți id-ul testului care trebuie rulat\n");
return 0;
}
char name[100];
sprintf(name, "./tests/test_%s.txt", argv[1]);
f = fopen(name, "r");
out = fopen("out.txt", "a");
grafic = fopen("grafic.txt", "a");
fscanf(f, "%d%d", &width, &height);
fprintf(grafic,"%d ",width*height);
Matrix M;//kernel de pe host
Matrix N;//matrice inițială de pe host
Matrix P;//rezultat fără memorie partajată calculat pe GPU
Matrix PS;//rezultatul cu memorie partajată calculat pe GPU
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE);
N = AllocateMatrix(width, height);
P = AllocateMatrix(width, height);
PS = AllocateMatrix(width, height);
GenerateRandomMatrix(M);
GenerateRandomMatrix(N);
// M * N pe device
ConvolutionOnDevice(M, N, P);
// M * N pe device cu memorie partajată
ConvolutionOnDeviceShared(M, N, PS);
// calculează rezultatul pe CPU pentru comparație
Matrix reference = AllocateMatrix(P.width, P.height);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
fprintf(grafic,"%s","\n");
// verifică dacă rezultatul obținut pe device este cel așteptat
int res = CompareMatrices(reference, P);
printf("Test global %s\n", (1 == res) ? "PASSED" : "FAILED");
fprintf(out, "Test global %s %s\n", argv[1], (1 == res) ? "PASSED" : "FAILED");
// verifică dacă rezultatul obținut pe device cu memorie partajată este cel așteptat
// int ress = CompareMatrices(reference, PS);
int ress = CompareMatrices(reference, PS);
printf("Test shared %s\n", (1 == ress) ? "PASSED" : "FAILED");
fprintf(out, "Test shared %s %s\n", argv[1], (1 == ress) ? "PASSED" : "FAILED");
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
FreeMatrix(&PS);
fclose(f);
fclose(out);
fclose(grafic);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
/*am adaugat variabila FILE * grafic
ce reprezinta fisierul de output pentru timp*/
Matrix Md, Nd, Pd; //matricele corespunzătoare de pe device
FILE *grafic;
float timp;
grafic = fopen("grafic.txt", "a");
//pentru măsurarea timpului de execuție în kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
//alocare matricele de pe device
Md = AllocateDeviceMatrix(M.width,M.height);
Nd = AllocateDeviceMatrix(N.width,N.height);
Pd = AllocateDeviceMatrix(N.width,N.height);
//copiere date de pe host (M, N) pe device (MD, Nd)
int size = M.width * M.height * sizeof(float);
cudaMemcpy( Md.elements, M.elements, size, cudaMemcpyHostToDevice);
size = N.width * N.height * sizeof(float);
cudaMemcpy( Nd.elements, N.elements, size, cudaMemcpyHostToDevice);
//setare configurație de rulare a kernelului
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N.width + BLOCK_SIZE - 1) /dimBlock.x,(N.height + BLOCK_SIZE - 1) /dimBlock.y);
sdkStartTimer(&kernelTime);
//lansare în execuție a kernelului
ConvolutionKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaThreadSynchronize();
sdkStopTimer(&kernelTime);
timp = sdkGetTimerValue(&kernelTime);
printf ("Timp execuție kernel: %f ms\n", timp);
fprintf(grafic,"%f ",timp);
//copiere rezultat pe host
size = N.width * N.height * sizeof(float);
cudaMemcpy( P.elements, Pd.elements, size, cudaMemcpyDeviceToHost);
//eliberarea memoriei matricelor de pe device
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
fclose(grafic);
}
//nu am realizat varianta cu memory shared
void ConvolutionOnDeviceShared(const Matrix M, const Matrix N, Matrix P)
{
//Matrix Md, Nd, Pd; //matricele corespunzătoare de pe device
FILE *grafic;
float timp;
grafic = fopen("grafic.txt", "a");
//pentru măsurarea timpului de execuție în kernel
StopWatchInterface *kernelTime = NULL;
sdkCreateTimer(&kernelTime);
sdkResetTimer(&kernelTime);
//TODO: alocați matricele de pe device
//TODO: copiați datele de pe host (M, N) pe device (MD, Nd)
//TODO: setați configurația de rulare a kernelului
sdkStartTimer(&kernelTime);
//TODO: lansați în execuție kernelul
cudaThreadSynchronize();
sdkStopTimer(&kernelTime);
timp = sdkGetTimerValue(&kernelTime);
printf ("Timp execuție kernel cu memorie partajată: %f ms\n", timp);
fprintf(grafic,"%f ",timp);
//TODO: copiaţi rezultatul pe host
//TODO: eliberați memoria matricelor de pe device
fclose(grafic);
}
// Alocă o matrice de dimensiune height*width pe device
Matrix AllocateDeviceMatrix(int width, int height)
{
//TODO: alocați matricea și setați width, pitch și height
Matrix m;
m.width = width;
m.height = height;
m.pitch = width;
size_t size = width * height * sizeof(float);
cudaMalloc((void**)&m.elements,size);
return m;
}
// Alocă matrice pe host de dimensiune height*width
Matrix AllocateMatrix(int width, int height)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = (float*) malloc(size*sizeof(float));
return M;
}
// Eliberează o matrice de pe device
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Eliberează o matrice de pe host
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
e17a8f6c2ed04e7b2b417e5c01d7014b0a7574b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel_hip.cuh"
#include <raft/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace raft::bench::distance::tune {
// Distance op
using OpT = raft::distance::detail::ops::lp_unexp_distance_op<DataT, AccT, IdxT>;
constexpr float metric_arg = 2.0;
OpT distance_op{metric_arg};
// Kernel policy
constexpr int vec_len = 1;
using Policy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
// Architecture
namespace arch = raft::util::arch;
constexpr auto sm_compat_range = arch::SM_range(arch::SM_min(), arch::SM_future());
void launch_kernel(pairwise_matrix_params params, dim3 grid, hipStream_t stream)
{
dim3 block(Policy::Nthreads);
int smem_size = OpT::shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), smem_size, stream, distance_op, params);
RAFT_CUDA_TRY(hipGetLastError());
}
void get_block_size(int& m, int& n, int& k)
{
m = Policy::Mblk;
n = Policy::Nblk;
k = Policy::Kblk;
}
void* get_kernel_ptr()
{
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
return reinterpret_cast<void*>(kernel);
}
int get_max_occupancy()
{
void* kernel_ptr = get_kernel_ptr();
int max_occupancy;
int smem_size = OpT::shared_mem_size<Policy>();
RAFT_CUDA_TRY(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_occupancy, kernel_ptr, Policy::Nthreads, smem_size));
return max_occupancy;
}
} // namespace raft::bench::distance::tune
|
e17a8f6c2ed04e7b2b417e5c01d7014b0a7574b9.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.cuh"
#include <raft/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace raft::bench::distance::tune {
// Distance op
using OpT = raft::distance::detail::ops::lp_unexp_distance_op<DataT, AccT, IdxT>;
constexpr float metric_arg = 2.0;
OpT distance_op{metric_arg};
// Kernel policy
constexpr int vec_len = 1;
using Policy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
// Architecture
namespace arch = raft::util::arch;
constexpr auto sm_compat_range = arch::SM_range(arch::SM_min(), arch::SM_future());
void launch_kernel(pairwise_matrix_params params, dim3 grid, cudaStream_t stream)
{
dim3 block(Policy::Nthreads);
int smem_size = OpT::shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
kernel<<<grid, block, smem_size, stream>>>(distance_op, params);
RAFT_CUDA_TRY(cudaGetLastError());
}
void get_block_size(int& m, int& n, int& k)
{
m = Policy::Mblk;
n = Policy::Nblk;
k = Policy::Kblk;
}
void* get_kernel_ptr()
{
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
return reinterpret_cast<void*>(kernel);
}
int get_max_occupancy()
{
void* kernel_ptr = get_kernel_ptr();
int max_occupancy;
int smem_size = OpT::shared_mem_size<Policy>();
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_occupancy, kernel_ptr, Policy::Nthreads, smem_size));
return max_occupancy;
}
} // namespace raft::bench::distance::tune
|
1a4187b14f7d8b796e8edf4ac5af64046ce34866.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef enum TARGET {HOST, DEVICE} TARGET;
typedef struct {
int width;
int height;
float *elements;
} Matrix;
__global__ void sgemm(Matrix A, Matrix B, Matrix C,
const float alpha, const float beta,
const int width, const int height) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = idx_y * width + idx_x;
if (idx_x >= width || idx_y >= height)
return;
float value = 0.f;
for (int e = 0; e < width; e++)
value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x];
C.elements[idx] = value + beta * C.elements[idx];
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST);
bool IsMatDiff(Matrix &A, Matrix &B);
void sgemm_host(Matrix &A, Matrix &B, Matrix &C,
const float alpha, const float beta,
const int width, const int height);
int main(int argv, char* argc[]) {
Matrix A, B, C_host, C_device;
Matrix dA, dB, dC;
const float alpha = 2.f;
const float beta = .5f;
const int width = 2048;
const int height = 2048;
float elapsed_gpu;
double elapsed_cpu;
// CUDA Event Create to estimate elased time
hipEvent_t start, stop;
struct timespec begin, finish;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize host matrix
InitMatrix(A, width, height);
InitMatrix(B, width, height);
InitMatrix(C_host, width, height);
InitMatrix(C_device, width, height);
// CUDA Memory Initialize
InitMatrix(dA, width, height, DEVICE);
InitMatrix(dB, width, height, DEVICE);
InitMatrix(dC, width, height, DEVICE);
// CUDA Operation
hipEventRecord(start, 0);
clock_gettime(CLOCK_MONOTONIC, &begin);
// Copy host data to the device (CUDA global memory)
hipMemcpy(dA.elements, A.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB.elements, B.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dC.elements, C_device.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
// Launch GPU Kernel
dim3 blockDim(8, 8);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y);
hipLaunchKernelGGL(( sgemm), dim3(gridDim), dim3(blockDim), 0, 0, dA, dB, dC, alpha, beta, width, height);
// Copy computation result from the Device the host memory
hipMemcpy(C_device.elements, dC.elements, width * height * sizeof(float), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
hipEventRecord(stop, 0);
// Estimate CUDA operation time
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Host time: %f ms\n", elapsed_cpu * 1000);
// Compute CPU Operation
clock_gettime(CLOCK_MONOTONIC, &begin);
sgemm_host(A, B, C_host, alpha, beta, width, height);
clock_gettime(CLOCK_MONOTONIC, &finish);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("SGEMM CPU only time: %f ms\n", elapsed_cpu * 1000);
if (IsMatDiff(C_host, C_device)) {
printf("Something wrong!!\n");
}
else {
printf("Success !!\n");
}
// finalize CUDA event
hipEventDestroy(start);
hipEventDestroy(stop);
// Finalize
hipFree(dA.elements);
hipFree(dB.elements);
hipFree(dC.elements);
free(A.elements);
free(B.elements);
free(C_host.elements);
free(C_device.elements);
return 0;
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target) {
mat.width = width;
mat.height = height;
if (target == DEVICE) {
hipMalloc((void**)&mat.elements, width * height * sizeof(float));
}
else {
mat.elements = (float*)malloc(width * height * sizeof(float));
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
mat.elements[row * width + height] = row * width + col * 0.001;
}
}
}
}
bool IsMatDiff(Matrix &A, Matrix &B) {
if (A.width != B.width || A.height != B.height) {
return true;
}
int count = 0;
for (int row = 0; row < A.height; row++) {
for (int col = 0; col < A.width; col++) {
count += (A.elements[row * A.width + col] != B.elements[row * A.width + col]) ? 1 : 0;
}
}
if (count != 0) {
return true;
}
return false;
}
void sgemm_host(Matrix &A, Matrix &B, Matrix &C, const float alpha, const float beta, const int width, const int height) {
for (int row = 0; row < C.height; row++) {
for (int col = 0; col < C.width; col++) {
float value = 0.f;
for (int e = 0; e < C.width; e++)
value = alpha * A.elements[row * width + e] * B.elements[e * width + col];
C.elements[row * width + col] = value + beta * C.elements[row * width + col];
}
}
}
|
1a4187b14f7d8b796e8edf4ac5af64046ce34866.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef enum TARGET {HOST, DEVICE} TARGET;
typedef struct {
int width;
int height;
float *elements;
} Matrix;
__global__ void sgemm(Matrix A, Matrix B, Matrix C,
const float alpha, const float beta,
const int width, const int height) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = idx_y * width + idx_x;
if (idx_x >= width || idx_y >= height)
return;
float value = 0.f;
for (int e = 0; e < width; e++)
value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x];
C.elements[idx] = value + beta * C.elements[idx];
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST);
bool IsMatDiff(Matrix &A, Matrix &B);
void sgemm_host(Matrix &A, Matrix &B, Matrix &C,
const float alpha, const float beta,
const int width, const int height);
int main(int argv, char* argc[]) {
Matrix A, B, C_host, C_device;
Matrix dA, dB, dC;
const float alpha = 2.f;
const float beta = .5f;
const int width = 2048;
const int height = 2048;
float elapsed_gpu;
double elapsed_cpu;
// CUDA Event Create to estimate elased time
cudaEvent_t start, stop;
struct timespec begin, finish;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize host matrix
InitMatrix(A, width, height);
InitMatrix(B, width, height);
InitMatrix(C_host, width, height);
InitMatrix(C_device, width, height);
// CUDA Memory Initialize
InitMatrix(dA, width, height, DEVICE);
InitMatrix(dB, width, height, DEVICE);
InitMatrix(dC, width, height, DEVICE);
// CUDA Operation
cudaEventRecord(start, 0);
clock_gettime(CLOCK_MONOTONIC, &begin);
// Copy host data to the device (CUDA global memory)
cudaMemcpy(dA.elements, A.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB.elements, B.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dC.elements, C_device.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
// Launch GPU Kernel
dim3 blockDim(8, 8);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y);
sgemm<<<gridDim, blockDim>>>(dA, dB, dC, alpha, beta, width, height);
// Copy computation result from the Device the host memory
cudaMemcpy(C_device.elements, dC.elements, width * height * sizeof(float), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
cudaEventRecord(stop, 0);
// Estimate CUDA operation time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Host time: %f ms\n", elapsed_cpu * 1000);
// Compute CPU Operation
clock_gettime(CLOCK_MONOTONIC, &begin);
sgemm_host(A, B, C_host, alpha, beta, width, height);
clock_gettime(CLOCK_MONOTONIC, &finish);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("SGEMM CPU only time: %f ms\n", elapsed_cpu * 1000);
if (IsMatDiff(C_host, C_device)) {
printf("Something wrong!!\n");
}
else {
printf("Success !!\n");
}
// finalize CUDA event
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Finalize
cudaFree(dA.elements);
cudaFree(dB.elements);
cudaFree(dC.elements);
free(A.elements);
free(B.elements);
free(C_host.elements);
free(C_device.elements);
return 0;
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target) {
mat.width = width;
mat.height = height;
if (target == DEVICE) {
cudaMalloc((void**)&mat.elements, width * height * sizeof(float));
}
else {
mat.elements = (float*)malloc(width * height * sizeof(float));
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
mat.elements[row * width + height] = row * width + col * 0.001;
}
}
}
}
bool IsMatDiff(Matrix &A, Matrix &B) {
if (A.width != B.width || A.height != B.height) {
return true;
}
int count = 0;
for (int row = 0; row < A.height; row++) {
for (int col = 0; col < A.width; col++) {
count += (A.elements[row * A.width + col] != B.elements[row * A.width + col]) ? 1 : 0;
}
}
if (count != 0) {
return true;
}
return false;
}
void sgemm_host(Matrix &A, Matrix &B, Matrix &C, const float alpha, const float beta, const int width, const int height) {
for (int row = 0; row < C.height; row++) {
for (int col = 0; col < C.width; col++) {
float value = 0.f;
for (int e = 0; e < C.width; e++)
value = alpha * A.elements[row * width + e] * B.elements[e * width + col];
C.elements[row * width + col] = value + beta * C.elements[row * width + col];
}
}
}
|
7517f760d6b692004f72281a39a9c56da44375bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_gru.h"
#include "saber/core/tensor_op.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
////TODO:can try record vector in shared
template <typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
}
}
template <typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq]*lastdim + tid % lastdim] = input[tid];
}
}
template <>
void SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::seq2hw(\
std::vector<DataTensor_out*> outputs, std::vector<DataTensor_in*> inputs,
GruParam<OpTensor>& param, int hidden_size,
void* real_temp_out
) {
DataTensor_in* din = inputs[0];
DataTensor_out* dout = outputs[0];
int wordsize = din->channel();
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int max_len = 0;
std::vector<int> length_vec;
if ((void*)(outputs[0]->data()) == real_temp_out) {
DLOG(INFO) << "not use inner space";
return;
}
const OutDataType* origin = _temp_tensor_out.data();
OutDataType* target = dout->mutable_data();
//source is sequence id in seq target is hw id in seq,map is source to target ptr offset
int seq_sum = offset_vec[batch_size];
CUDA_CHECK(hipMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
hipMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * hidden_size;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
hipLaunchKernelGGL(( trans_map2in) , dim3(grid_dim), dim3(block_dim), 0, _ctx.get_compute_stream(), target, origin, _temp_map_dev.data(),
count, hidden_size);
// trans_map2in_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, hidden_size);
}
//TODO:gem by self, flatten by time, padding by nothing (zhangs)
template <>
const float* SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::hw2seq(\
std::vector<DataTensor_in*> inputs, GruParam<OpTensor>& param, \
int word_size, int hidden_size, int& sequence_len) {
DataTensor_in* din = inputs[0];
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int seq_sum = offset_vec[offset_vec.size() - 1];
int wordsize = din->channel();
int max_len = 0;
std::vector<int> length_vec(batch_size);
for (int i = 0; i < offset_vec.size() - 1; ++i) {
int len = offset_vec[i + 1] - offset_vec[i];
max_len = max_len > len ? max_len : len;
length_vec[i] = len;
}
Shape seq_shape(1, max_len, batch_size, word_size);
_temp_tensor_in.try_expand_size(seq_shape);
Shape seq_out_shape(1, max_len, batch_size, hidden_size);
_temp_tensor_out.try_expand_size(seq_out_shape);
sequence_len = max_len;
if (batch_size == 1 || max_len == 1) {
return din->mutable_data();
}
InDataType* target = _temp_tensor_in.mutable_data();
const InDataType* origin = din->data();
_temp_map_host.try_expand_size(seq_sum);
_temp_map_dev.try_expand_size(seq_sum);
int* map = _temp_map_host.mutable_data();
if (param.is_reverse) {
for (int batchid = 0; batchid < batch_size; ++batchid) {
int batch_offset = max_len - length_vec[batchid];
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = ((seqid + batch_offset) * batch_size + batchid);
map[source] = target;
}
}
} else {
for (int batchid = 0; batchid < batch_size; ++batchid) {
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = (seqid * batch_size + batchid);
map[source] = target;
}
}
}
CUDA_CHECK(hipMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
hipMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * wordsize;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
hipLaunchKernelGGL(( trans_map2out) , dim3(grid_dim), dim3(block_dim), 0, _ctx.get_compute_stream(), target, origin, _temp_map_dev.data(),
count, wordsize);
// trans_map2out_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, wordsize);
return _temp_tensor_in.data();
}
#define SIGMOID_THRESHOLD_MIN_PADDLE -40.0
#define SIGMOID_THRESHOLD_MAX_PADDLE 13.0
#define EXP_MAX_INPUT_PADDLE 40.0
template <typename T>
inline static __device__ T identity(const T a) {
return a;
}
template <typename T>
inline static __device__ T relu(const T a) {
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0);
}
template <typename T>
inline static __device__ T sigmoid_paddle(const T a) {
const T min = SIGMOID_THRESHOLD_MIN_PADDLE;
const T max = SIGMOID_THRESHOLD_MAX_PADDLE;
T tmp = (a < min) ? min : ((a > max) ? max : a);
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp));
}
template <typename T>
inline static __device__ T tanh_paddle(const T a) {
T tmp = -2.0 * a;
tmp = (tmp > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : tmp;
return (2.0 / (1.0 + exp(tmp))) - 1.0;
}
static void anakin_NV_gemm(hipblasHandle_t handle, const bool TransA,
const bool TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (!TransA/* == CblasNoTrans*/) ? K : M;
int ldb = (!TransB/* == CblasNoTrans*/) ? N : K;
hipblasOperation_t cuTransA =
(!TransA/* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(!TransB/* == CblasNoTrans*/) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
/**
* gridDim=batchsize
* @tparam Dtype
* @param w_x_r
* @param w_h_r
* @param br
* @param hidden_size
* @param output_r
* @param w_x_z
* @param w_h_z
* @param bz
* @param output_z
*/
template <typename Dtype>
__global__ void cal_reset_update(Dtype* w_x_r, Dtype* w_h_r, const Dtype* b_r,
const int hidden_size, Dtype* output_r,
Dtype* w_x_z, Dtype* w_h_z, const Dtype* b_z, Dtype* output_z) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* out_output_r = output_r + h_base_index;
Dtype* out_output_z = output_z + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
out_output_r[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
out_output_z[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
}
}
template <typename Dtype>
__global__ void cal_final(Dtype* w_x_o, Dtype* w_h_o, Dtype* reset, const Dtype* b_o,
const int hidden_size, Dtype* update, Dtype* output, Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* in_update = update + h_base_index;
Dtype* in_reset = reset + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * in_reset[index]
+ b_o[index];
Dtype acted = tanhf(before_act_h);
Dtype update_t = in_update[index];
out_output[index] = (1 - update_t) * acted + update_t* in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_tanh_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
before_act_h = (before_act_h > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : before_act_h;
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanh(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_paddle_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = sigmoid_paddle(before_act_r);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = sigmoid_paddle(before_act_z);
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = relu(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_paddle_formula(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
// printf("%d %f=[%f , %f ,%f]\n",index,act_r,w_x_r[w_base_index],w_h_r[u_base_index],b_r[index]);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
// printf("output %d = %f\n",index,output[h_base_index]);
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
int index_w = index + w_base_index;
int index_h = index + h_base_index;
{
Dtype before_act_r = w_x_r[index_w] + w_h_r[index_w] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[index_w] + w_h_z[index_w] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[index_w] + w_h_o[index_w] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[index_h] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[index_h];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
{
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = before_act_h > static_cast<Dtype>(0.0) ? before_act_h : static_cast<Dtype>(0.0);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::gru_cudnn(
const std::vector<DataTensor_in*> inputs,
std::vector<DataTensor_out*> outputs,
GruParam<OpTensor>& param) {
DataTensor_in* x = inputs[0];
const InDataType* x_data = x->data();
std::vector<int> offset=x->get_seq_offset();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1;; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
batch_size = offset.size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_wx(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_wx);
Shape shape_wh(1, batch_size, 3, hidden_size);
_temp_WH.try_expand_size(shape_wh);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(hipMemsetAsync(dout_data, 0, sizeof(InDataType) * batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
CHECK_EQ(inputs[1]->valid_size(), batch_size * hidden_size) <<
"h size should be batch_size * hidden_size";
}
for (int seq = 0; seq < sequence; seq++) {
const InDataType* hidden_in;
InDataType* hidden_out = dout_data + seq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + (seq - 1) * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
3 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data(), 0.0, _temp_WH.mutable_data());
OpDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_h_r = _temp_WH.mutable_data() + r_offset * hidden_size;
OpDataType* w_h_z = _temp_WH.mutable_data() + z_offset * hidden_size;
OpDataType* w_h_o = _temp_WH.mutable_data() + o_offset * hidden_size;
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_modi_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_tanh) {
cal_one_kernel_paddlesigmoid_tanh_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
}
return SaberSuccess;
}
template<>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::dispatch(\
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
GruParam <OpTensor>& param) {
if (param.formula == GRU_CUDNN) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
if (param.is_reverse) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
}
return gru_cudnn(inputs, outputs, param);
}
// LOG(INFO)<<"gru_paddle";
DataTensor_in* x = inputs[0];
std::vector<int> offset=x->get_seq_offset();
const InDataType* x_data = x->data();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
// batch_size = inputs[0]->get_seq_offset().size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_WX(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_WX);
Shape shape_WH(1, batch_size, 2, hidden_size);
_temp_WH.try_expand_size(shape_WH);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(hipMemsetAsync(dout_data, 0, sizeof(OutDataType)*batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
}
for (int seq = 0; seq < sequence; ++seq) {
int realseq = seq;
int last_seq = realseq - 1;
if (param.is_reverse) {
// DLOG(INFO)<<"reverse gru";
realseq = sequence - 1 - seq;
last_seq = realseq + 1;
}
const OutDataType* hidden_in;
OutDataType* hidden_out = dout_data + realseq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + last_seq * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
2 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data() + hidden_size * hidden_size, 0.0, _temp_WH.mutable_data());
OutDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_h_r = _temp_WH.mutable_data() + 0 * hidden_size;
OutDataType* w_h_z = _temp_WH.mutable_data() + 1 * hidden_size;
const OpDataType * w_o = _weights_h2h.data();
CHECK_LE(hidden_size, 1024) << "now not support hidden size > 1024 for paddle formula";
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
// DLOG(INFO) << "act = " << param._gate_activity << "," << param._h_activity;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
hipLaunchKernelGGL(( cal_one_kernel_sigmoid_tanh_paddle_formula)
, dim3(batch_size), dim3(frame_per_block), sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream(),
w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_paddle_formula
<< < batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
}
}
|
7517f760d6b692004f72281a39a9c56da44375bf.cu
|
#include "saber/funcs/impl/cuda/saber_gru.h"
#include "saber/core/tensor_op.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
////TODO:can try record vector in shared
template <typename Dtype>
__global__ void trans_map2in(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[tid] = input[map[seq] * lastdim + tid % lastdim];
}
}
template <typename Dtype>
__global__ void trans_map2out(Dtype* output, const Dtype* input, const int* map, int count,
int lastdim) {
CUDA_KERNEL_LE(tid, count) {
int seq = tid / lastdim;
output[map[seq]*lastdim + tid % lastdim] = input[tid];
}
}
template <>
void SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::seq2hw(\
std::vector<DataTensor_out*> outputs, std::vector<DataTensor_in*> inputs,
GruParam<OpTensor>& param, int hidden_size,
void* real_temp_out
) {
DataTensor_in* din = inputs[0];
DataTensor_out* dout = outputs[0];
int wordsize = din->channel();
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int max_len = 0;
std::vector<int> length_vec;
if ((void*)(outputs[0]->data()) == real_temp_out) {
DLOG(INFO) << "not use inner space";
return;
}
const OutDataType* origin = _temp_tensor_out.data();
OutDataType* target = dout->mutable_data();
//source is sequence id in seq target is hw id in seq,map is source to target ptr offset
int seq_sum = offset_vec[batch_size];
CUDA_CHECK(cudaMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
cudaMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * hidden_size;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
trans_map2in <<< grid_dim, block_dim, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
count, hidden_size);
// trans_map2in_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, hidden_size);
}
//TODO:gem by self, flatten by time, padding by nothing (zhangs)
template <>
const float* SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::hw2seq(\
std::vector<DataTensor_in*> inputs, GruParam<OpTensor>& param, \
int word_size, int hidden_size, int& sequence_len) {
DataTensor_in* din = inputs[0];
std::vector<int> offset_vec = din->get_seq_offset();
CHECK_GE(offset_vec.size(), 2) << "offset must >=2" ;
int batch_size = offset_vec.size() - 1;
int seq_sum = offset_vec[offset_vec.size() - 1];
int wordsize = din->channel();
int max_len = 0;
std::vector<int> length_vec(batch_size);
for (int i = 0; i < offset_vec.size() - 1; ++i) {
int len = offset_vec[i + 1] - offset_vec[i];
max_len = max_len > len ? max_len : len;
length_vec[i] = len;
}
Shape seq_shape(1, max_len, batch_size, word_size);
_temp_tensor_in.try_expand_size(seq_shape);
Shape seq_out_shape(1, max_len, batch_size, hidden_size);
_temp_tensor_out.try_expand_size(seq_out_shape);
sequence_len = max_len;
if (batch_size == 1 || max_len == 1) {
return din->mutable_data();
}
InDataType* target = _temp_tensor_in.mutable_data();
const InDataType* origin = din->data();
_temp_map_host.try_expand_size(seq_sum);
_temp_map_dev.try_expand_size(seq_sum);
int* map = _temp_map_host.mutable_data();
if (param.is_reverse) {
for (int batchid = 0; batchid < batch_size; ++batchid) {
int batch_offset = max_len - length_vec[batchid];
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = ((seqid + batch_offset) * batch_size + batchid);
map[source] = target;
}
}
} else {
for (int batchid = 0; batchid < batch_size; ++batchid) {
for (int seqid = 0; seqid < length_vec[batchid]; ++seqid) {
int source = (offset_vec[batchid] + seqid);
int target = (seqid * batch_size + batchid);
map[source] = target;
}
}
}
CUDA_CHECK(cudaMemcpyAsync(_temp_map_dev.mutable_data(), _temp_map_host.data(), sizeof(int)*seq_sum,
cudaMemcpyHostToDevice, _ctx.get_compute_stream()));
int count=seq_sum * wordsize;
int block_dim=count;
int grid_dim=1;
if(count>1024){
block_dim=256;
grid_dim=(count+block_dim-1)/block_dim;
}
trans_map2out <<< grid_dim, block_dim, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
count, wordsize);
// trans_map2out_old <<< 4, 128, 0, _ctx.get_compute_stream()>>>(target, origin, _temp_map_dev.data(),
// count, wordsize);
return _temp_tensor_in.data();
}
#define SIGMOID_THRESHOLD_MIN_PADDLE -40.0
#define SIGMOID_THRESHOLD_MAX_PADDLE 13.0
#define EXP_MAX_INPUT_PADDLE 40.0
template <typename T>
inline static __device__ T identity(const T a) {
return a;
}
template <typename T>
inline static __device__ T relu(const T a) {
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0);
}
template <typename T>
inline static __device__ T sigmoid_paddle(const T a) {
const T min = SIGMOID_THRESHOLD_MIN_PADDLE;
const T max = SIGMOID_THRESHOLD_MAX_PADDLE;
T tmp = (a < min) ? min : ((a > max) ? max : a);
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp));
}
template <typename T>
inline static __device__ T tanh_paddle(const T a) {
T tmp = -2.0 * a;
tmp = (tmp > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : tmp;
return (2.0 / (1.0 + exp(tmp))) - 1.0;
}
static void anakin_NV_gemm(cublasHandle_t handle, const bool TransA,
const bool TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (!TransA/* == CblasNoTrans*/) ? K : M;
int ldb = (!TransB/* == CblasNoTrans*/) ? N : K;
cublasOperation_t cuTransA =
(!TransA/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(!TransB/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
/**
* gridDim=batchsize
* @tparam Dtype
* @param w_x_r
* @param w_h_r
* @param br
* @param hidden_size
* @param output_r
* @param w_x_z
* @param w_h_z
* @param bz
* @param output_z
*/
template <typename Dtype>
__global__ void cal_reset_update(Dtype* w_x_r, Dtype* w_h_r, const Dtype* b_r,
const int hidden_size, Dtype* output_r,
Dtype* w_x_z, Dtype* w_h_z, const Dtype* b_z, Dtype* output_z) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* out_output_r = output_r + h_base_index;
Dtype* out_output_z = output_z + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
out_output_r[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
out_output_z[index] = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
}
}
template <typename Dtype>
__global__ void cal_final(Dtype* w_x_o, Dtype* w_h_o, Dtype* reset, const Dtype* b_o,
const int hidden_size, Dtype* update, Dtype* output, Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* in_update = update + h_base_index;
Dtype* in_reset = reset + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * in_reset[index]
+ b_o[index];
Dtype acted = tanhf(before_act_h);
Dtype update_t = in_update[index];
out_output[index] = (1 - update_t) * acted + update_t* in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_tanh_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
before_act_h = (before_act_h > EXP_MAX_INPUT_PADDLE) ? EXP_MAX_INPUT_PADDLE : before_act_h;
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanh(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_paddle_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = sigmoid_paddle(before_act_r);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = sigmoid_paddle(before_act_z);
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = relu(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_paddle_formula(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, const Dtype* w_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int index = threadIdx.x;
if (index > hidden_size) {
return;
}
int w_base_index = blockIdx.x * hidden_size * 3 + index;
int u_base_index = blockIdx.x * hidden_size * 2 + index;
int h_base_index = blockIdx.x * hidden_size + index;
extern __shared__ Dtype shared_hidden_pre[];
Dtype hidden_pre_value = hidden_pre[h_base_index];
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
// printf("%d %f=[%f , %f ,%f]\n",index,act_r,w_x_r[w_base_index],w_h_r[u_base_index],b_r[index]);
shared_hidden_pre[index] = hidden_pre_value * act_r;
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype w_h_o = static_cast<Dtype>(0.0);
int k_index = index;
__syncthreads();
for (int w_index = 0; w_index < hidden_size; ++w_index) {
w_h_o += shared_hidden_pre[w_index] * w_o[k_index];
k_index += hidden_size;
}
Dtype before_act_h = w_x_o[w_base_index] + w_h_o
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted;
// printf("output %d = %f\n",index,output[h_base_index]);
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
int index_w = index + w_base_index;
int index_h = index + h_base_index;
{
Dtype before_act_r = w_x_r[index_w] + w_h_r[index_w] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[index_w] + w_h_z[index_w] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[index_w] + w_h_o[index_w] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[index_h] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[index_h];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_index(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre,
int seq_batch_hidden, int batch_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= seq_batch_hidden) {
return;
}
int batch_id = tid / hidden_size % batch_size;
int index = tid % hidden_size;
int w_base_index = batch_id * hidden_size * 3;
int h_base_index = batch_id * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
{
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
Dtype act_r = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_r)));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
Dtype act_z = Dtype(Dtype(1) / (Dtype(1) + expf(-before_act_z)));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_paddlesigmoid_relu_cudnn_formula(Dtype* w_x_r, Dtype* w_x_z,
Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3;
int h_base_index = blockIdx.x * hidden_size;
Dtype* in_w_x_r = w_x_r + w_base_index;
Dtype* in_w_h_r = w_h_r + w_base_index;
Dtype* in_w_x_z = w_x_z + w_base_index;
Dtype* in_w_h_z = w_h_z + w_base_index;
Dtype* in_w_x_o = w_x_o + w_base_index;
Dtype* in_w_h_o = w_h_o + w_base_index;
const Dtype* in_hidden_pre = hidden_pre + h_base_index;
Dtype* out_output = output + h_base_index;
for (int index = threadIdx.x; index < hidden_size; index += blockDim.x) {
const Dtype min = SIGMOID_THRESHOLD_MIN_PADDLE;
const Dtype max = SIGMOID_THRESHOLD_MAX_PADDLE;
Dtype before_act_r = in_w_x_r[index] + in_w_h_r[index] + b_r[index];
before_act_r = (before_act_r < min) ? min : ((before_act_r > max) ? max : before_act_r);
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_r));
Dtype before_act_z = in_w_x_z[index] + in_w_h_z[index] + b_z[index];
before_act_z = (before_act_z < min) ? min : ((before_act_z > max) ? max : before_act_z);
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + exp(-before_act_z));
Dtype before_act_h = in_w_x_o[index] + in_w_h_o[index] * act_r
+ b_o[index];
Dtype acted = before_act_h > static_cast<Dtype>(0.0) ? before_act_h : static_cast<Dtype>(0.0);
out_output[index] = (1 - act_z) * acted + act_z * in_hidden_pre[index];
}
}
template <typename Dtype>
__global__ void cal_one_kernel_sigmoid_tanh_modi(Dtype* w_x_r, Dtype* w_x_z, Dtype* w_x_o,
Dtype* w_h_r, Dtype* w_h_z, Dtype* w_h_o,
const Dtype* b_r, const Dtype* b_z, const Dtype* b_o,
int hidden_size, Dtype* output, const Dtype* hidden_pre) {
int w_base_index = blockIdx.x * hidden_size * 3 + threadIdx.x;
int h_base_index = blockIdx.x * hidden_size + threadIdx.x;
for (int index = threadIdx.x; index < hidden_size;
index += blockDim.x, w_base_index += blockDim.x, h_base_index += blockDim.x) {
Dtype before_act_r = w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index];
Dtype act_r = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_r));
Dtype before_act_z = w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index];
Dtype act_z = static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-before_act_z));
Dtype before_act_h = w_x_o[w_base_index] + w_h_o[w_base_index] * act_r
+ b_o[index];
Dtype acted = tanhf(before_act_h);
output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * acted + act_z * hidden_pre[h_base_index];
}
}
template <>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::gru_cudnn(
const std::vector<DataTensor_in*> inputs,
std::vector<DataTensor_out*> outputs,
GruParam<OpTensor>& param) {
DataTensor_in* x = inputs[0];
const InDataType* x_data = x->data();
std::vector<int> offset=x->get_seq_offset();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1;; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
batch_size = offset.size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_wx(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_wx);
Shape shape_wh(1, batch_size, 3, hidden_size);
_temp_WH.try_expand_size(shape_wh);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(cudaMemsetAsync(dout_data, 0, sizeof(InDataType) * batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
CHECK_EQ(inputs[1]->valid_size(), batch_size * hidden_size) <<
"h size should be batch_size * hidden_size";
}
for (int seq = 0; seq < sequence; seq++) {
const InDataType* hidden_in;
InDataType* hidden_out = dout_data + seq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + (seq - 1) * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
3 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data(), 0.0, _temp_WH.mutable_data());
OpDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ seq * batch_size * hidden_size * 3;
OpDataType* w_h_r = _temp_WH.mutable_data() + r_offset * hidden_size;
OpDataType* w_h_z = _temp_WH.mutable_data() + z_offset * hidden_size;
OpDataType* w_h_o = _temp_WH.mutable_data() + o_offset * hidden_size;
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_modi_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_tanh) {
cal_one_kernel_paddlesigmoid_tanh_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_cudnn_formula
<< < batch_size, frame_per_block, 0, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
}
return SaberSuccess;
}
template<>
SaberStatus SaberGru<NV, AK_FLOAT, AK_FLOAT, AK_FLOAT, NCHW, NCHW, NCHW>::dispatch(\
const std::vector<DataTensor_in*>& inputs,
std::vector<DataTensor_out*>& outputs,
GruParam <OpTensor>& param) {
if (param.formula == GRU_CUDNN) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
if (param.is_reverse) {
LOG(ERROR) << "saber cudnn formula not support reverse yet";
}
return gru_cudnn(inputs, outputs, param);
}
// LOG(INFO)<<"gru_paddle";
DataTensor_in* x = inputs[0];
std::vector<int> offset=x->get_seq_offset();
const InDataType* x_data = x->data();
const InDataType* h;
DataTensor_out* dout = outputs[0];
OutDataType* dout_data = dout->mutable_data();
//TODO:check shape first
const OpTensor* b = param.bias();
int batch_size = offset.size() - 1; //x->get_seq_offset().size()-1;
int sequence = x->num();
int hidden_size = b->valid_size() / 3;
bool isHW2Seq=offset.size()>2;
int o_offset = 0;
int r_offset = 1;
int z_offset = 2;
// CHECK_EQ(w_h2h->height(), hidden_size) << "w_h2h->height()==batch_size";
// CHECK_EQ(w_h2h->width(), hidden_size * 3) << "w_h2h->width()==hidden_size*3";
//
// CHECK_EQ(w_i2h->height(), word_size) << "w_i2h->height()==word_size";
// CHECK_EQ(w_i2h->width(), hidden_size * 3) << "w_i2h->width()==hidden_size*3";
if (isHW2Seq) {
x_data = hw2seq(inputs, param, _word_size, hidden_size, sequence);
// batch_size = inputs[0]->get_seq_offset().size() - 1;
if (x_data != x->data()) {
dout_data = _temp_tensor_out.mutable_data();
}
}
Shape shape_WX(sequence, batch_size, 3, hidden_size);
_temp_WX.try_expand_size(shape_WX);
Shape shape_WH(1, batch_size, 2, hidden_size);
_temp_WH.try_expand_size(shape_WH);
anakin_NV_gemm(_cublas_handle, false, false, sequence * batch_size, 3 * hidden_size,
_word_size, 1.0, x_data, _weights_i2h.data(), 0.0, _temp_WX.mutable_data());
const OpDataType* b_r = b->data() + r_offset * hidden_size;
const OpDataType* b_z = b->data() + z_offset * hidden_size;
const OpDataType* b_o = b->data() + o_offset * hidden_size;
if (inputs.size() == 1) {
CUDA_CHECK(cudaMemsetAsync(dout_data, 0, sizeof(OutDataType)*batch_size * hidden_size,
_ctx.get_compute_stream()));
h = dout_data;
} else {
h = inputs[1]->data();
}
for (int seq = 0; seq < sequence; ++seq) {
int realseq = seq;
int last_seq = realseq - 1;
if (param.is_reverse) {
// DLOG(INFO)<<"reverse gru";
realseq = sequence - 1 - seq;
last_seq = realseq + 1;
}
const OutDataType* hidden_in;
OutDataType* hidden_out = dout_data + realseq * batch_size * hidden_size;
if (seq == 0) {
hidden_in = h;
} else {
hidden_in = dout_data + last_seq * batch_size * hidden_size;
}
anakin_NV_gemm(_cublas_handle, false, false, batch_size,
2 * hidden_size, hidden_size, 1.0, hidden_in,
_weights_h2h.data() + hidden_size * hidden_size, 0.0, _temp_WH.mutable_data());
OutDataType* w_x_r = _temp_WX.mutable_data() + r_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_z = _temp_WX.mutable_data() + z_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_x_o = _temp_WX.mutable_data() + o_offset * hidden_size
+ realseq * batch_size * hidden_size * 3;
OutDataType* w_h_r = _temp_WH.mutable_data() + 0 * hidden_size;
OutDataType* w_h_z = _temp_WH.mutable_data() + 1 * hidden_size;
const OpDataType * w_o = _weights_h2h.data();
CHECK_LE(hidden_size, 1024) << "now not support hidden size > 1024 for paddle formula";
int frame_per_block = hidden_size <= 1024 ? hidden_size : 1024;
// DLOG(INFO) << "act = " << param._gate_activity << "," << param._h_activity;
if (param.gate_activity == Active_sigmoid
&& param.h_activity == Active_tanh) {
cal_one_kernel_sigmoid_tanh_paddle_formula
<<< batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream()>>>(
w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else if (param.gate_activity == Active_sigmoid_fluid
&& param.h_activity == Active_relu) {
cal_one_kernel_paddlesigmoid_relu_paddle_formula
<< < batch_size, frame_per_block, sizeof(OutDataType)*hidden_size
, _ctx.get_compute_stream() >> >
(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_o
, b_r, b_z, b_o, hidden_size, hidden_out, hidden_in);
} else {
LOG(ERROR) << "not support active function";
}
}
if (isHW2Seq) {
seq2hw(outputs, inputs, param, hidden_size, dout_data);
}
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
}
}
|
34e24fc1d0af4357bf819a83b6909096cbefcc89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void padding(int *op,int *ip,int N,int C,int H,int W,int Py,int Px){
unsigned int input_id = (blockIdx.x*gridDim.y + blockIdx.y + blockIdx.z*gridDim.x*gridDim.y)*blockDim.x + threadIdx.x;
int i = input_id/(C*H*W);
input_id = input_id%(C*H*W);
int j = input_id/(H*W);
input_id = input_id%(H*W);
int k = input_id/W;
int l = input_id%W;
*(op + i*C*(H + 2*Py)*(W + 2*Px) + j*(H + 2*Py)*(W + 2*Px) + (k + Py)*(W + 2*Px) + (l + Px)) = *(ip + i*C*H*W + j*H*W + k*W + l);
}
|
34e24fc1d0af4357bf819a83b6909096cbefcc89.cu
|
#include "includes.h"
__global__ void padding(int *op,int *ip,int N,int C,int H,int W,int Py,int Px){
unsigned int input_id = (blockIdx.x*gridDim.y + blockIdx.y + blockIdx.z*gridDim.x*gridDim.y)*blockDim.x + threadIdx.x;
int i = input_id/(C*H*W);
input_id = input_id%(C*H*W);
int j = input_id/(H*W);
input_id = input_id%(H*W);
int k = input_id/W;
int l = input_id%W;
*(op + i*C*(H + 2*Py)*(W + 2*Px) + j*(H + 2*Py)*(W + 2*Px) + (k + Py)*(W + 2*Px) + (l + Px)) = *(ip + i*C*H*W + j*H*W + k*W + l);
}
|
ba35d91bfb033fac14a5199d1a9850172a3d467f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _2D_CUBIC_BSPLINE_PREFILTER_H_
#define _2D_CUBIC_BSPLINE_PREFILTER_H_
#include <stdio.h>
#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
#define MAX_DIMENSION 512
#define MEM_INTERLEAVE 32
// ***************************************************************************
// * Global GPU procedures
// ***************************************************************************
__global__ void SamplesToCoefficients2DX_simple(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
float* line = image + y * width; //direct access
ConvertToInterpolationCoefficients(line, width);
}
__global__ void SamplesToCoefficients2DX(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint startIdx = y * width;
float line[MAX_DIMENSION];
// access the memory in an interleaved manner, to gain some performance
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
line[x] = image[i];
ConvertToInterpolationCoefficients(line, width);
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
image[i] = line[x];
}
__global__ void SamplesToCoefficients2DY(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint y = 0, i = x; y < height; y++) {
line[y] = image[i];
i += width;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint y = 0, i = x; y < height; y++) {
image[i] = line[y];
i += width;
}
}
#undef MAX_DIMENSION
#undef MEM_INTERLEAVE
// ***************************************************************************
// * Exported functions
// ***************************************************************************
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param width image width in number of pixels
//! @param height image height in number of pixels
extern "C"
void CubicBSplinePrefilter2D(float* image, uint width, uint height)
{
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
hipLaunchKernelGGL(( SamplesToCoefficients2DX), dim3(dimGridX), dim3(dimBlockX), 0, 0, image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
hipLaunchKernelGGL(( SamplesToCoefficients2DY), dim3(dimGridY), dim3(dimBlockY), 0, 0, image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
}
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param width image width in number of pixels
//! @param height image height in number of pixels
//! @note Prints stopwatch feedback
extern "C"
void CubicBSplinePrefilter2DTimer(float* image, uint width, uint height)
{
printf("\nCubic B-Spline Prefilter timer:\n");
unsigned int hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
hipLaunchKernelGGL(( SamplesToCoefficients2DX), dim3(dimGridX), dim3(dimBlockX), 0, 0, image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
hipLaunchKernelGGL(( SamplesToCoefficients2DY), dim3(dimGridY), dim3(dimBlockY), 0, 0, image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
printf("total : %f msec\n\n", timerValueX+timerValueY);
}
#endif //_2D_CUBIC_BSPLINE_PREFILTER_H_
|
ba35d91bfb033fac14a5199d1a9850172a3d467f.cu
|
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _2D_CUBIC_BSPLINE_PREFILTER_H_
#define _2D_CUBIC_BSPLINE_PREFILTER_H_
#include <stdio.h>
#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
#define MAX_DIMENSION 512
#define MEM_INTERLEAVE 32
// ***************************************************************************
// * Global GPU procedures
// ***************************************************************************
__global__ void SamplesToCoefficients2DX_simple(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
float* line = image + y * width; //direct access
ConvertToInterpolationCoefficients(line, width);
}
__global__ void SamplesToCoefficients2DX(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint startIdx = y * width;
float line[MAX_DIMENSION];
// access the memory in an interleaved manner, to gain some performance
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
line[x] = image[i];
ConvertToInterpolationCoefficients(line, width);
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
image[i] = line[x];
}
__global__ void SamplesToCoefficients2DY(
float* image, // in-place processing
uint width, // width of the volume
uint height) // height of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint y = 0, i = x; y < height; y++) {
line[y] = image[i];
i += width;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint y = 0, i = x; y < height; y++) {
image[i] = line[y];
i += width;
}
}
#undef MAX_DIMENSION
#undef MEM_INTERLEAVE
// ***************************************************************************
// * Exported functions
// ***************************************************************************
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param width image width in number of pixels
//! @param height image height in number of pixels
extern "C"
void CubicBSplinePrefilter2D(float* image, uint width, uint height)
{
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<<<dimGridX, dimBlockX>>>(image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<<<dimGridY, dimBlockY>>>(image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
}
//! Convert the pixel values into cubic b-spline coefficients
//! @param image pointer to the image bitmap in GPU (device) memory
//! @param width image width in number of pixels
//! @param height image height in number of pixels
//! @note Prints stopwatch feedback
extern "C"
void CubicBSplinePrefilter2DTimer(float* image, uint width, uint height)
{
printf("\nCubic B-Spline Prefilter timer:\n");
unsigned int hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockX(min(PowTwoDivider(height), 64));
dim3 dimGridX(height / dimBlockX.x);
SamplesToCoefficients2DX<<<dimGridX, dimBlockX>>>(image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimBlockY(min(PowTwoDivider(width), 64));
dim3 dimGridY(width / dimBlockY.x);
SamplesToCoefficients2DY<<<dimGridY, dimBlockY>>>(image, width, height);
CUT_CHECK_ERROR("SamplesToCoefficients2DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
printf("total : %f msec\n\n", timerValueX+timerValueY);
}
#endif //_2D_CUBIC_BSPLINE_PREFILTER_H_
|
6b43396f5b8017bc07cccd1f8be9d5556044add8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void downsample_(float *input, float *output, int factor, int size3, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int dim3 = id % size3;
int dim2 = id / size3;
atomicAdd(output + ((dim2 / factor) * (size3 / factor) + (dim3 / factor)), input[id] / (factor * factor));
}
}
|
6b43396f5b8017bc07cccd1f8be9d5556044add8.cu
|
#include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void downsample_(float *input, float *output, int factor, int size3, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int dim3 = id % size3;
int dim2 = id / size3;
atomicAdd(output + ((dim2 / factor) * (size3 / factor) + (dim3 / factor)), input[id] / (factor * factor));
}
}
|
dde9e7cc23efbdf45dd682ae92227228ec21cb71.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_test.h"
double A[MATRIX_ROWS * MATRIX_COLS];
double B[MATRIX_ROWS * MATRIX_COLS];
double transB[MATRIX_COLS * MATRIX_ROWS];
double C[MATRIX_ROWS * MATRIX_ROWS];
double C_cpu[MATRIX_ROWS * MATRIX_ROWS];
void generate_rand_matrix()
{
srand((int)time(0));
for(int i = 0; i<MATRIX_ROWS; i++)
{
for(int j = 0; j<MATRIX_COLS; j++)
{
A[i * MATRIX_COLS + j] = (float)rand()/RAND_MAX + (float)rand()/RAND_MAX/RAND_MAX;
B[i * MATRIX_COLS + j] = (float)rand()/RAND_MAX + (float)rand()/RAND_MAX/RAND_MAX;
}
}
// for(int i = 0; i<MATRIX_ROWS * MATRIX_COLS; i++)
// printf("%lf ", B[i]);
// printf("\n");
}
__global__ static void TransposeMatrix(double *A, double *B)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
for(int i = tid; i<MATRIX_ROWS*MATRIX_COLS; i += BLOCK_NUM * THREAD_NUM)
{
int row = tid / MATRIX_COLS;
int col = tid % MATRIX_COLS;
if (row < MATRIX_ROWS && col < MATRIX_COLS)
B[row * MATRIX_COLS + col] = A[col * MATRIX_ROWS + row];
}
}
__global__ static void MatrixMultTransposeB(double *A, double * oriB, double *B, double *C)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
int row, col;
double sum;
for(int i = tid; i<MATRIX_ROWS*MATRIX_ROWS; i += BLOCK_NUM * THREAD_NUM)
{
sum = 0;
row = i / MATRIX_ROWS;
col = i % MATRIX_ROWS;
for(int j = 0; j<MATRIX_COLS; j++)
{
// if(oriB[j*MATRIX_ROWS + col]!=B[col * MATRIX_COLS + j])
// printf(" %d %d\n", row, col);
sum += A[row*MATRIX_COLS + j] * oriB[j*MATRIX_ROWS + col];//B[col * MATRIX_COLS + j];
}
C[i] = sum;
}
}
__global__ static void MatrixMultOriginal(double *A, double *B, double *C)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
int row, col;
double sum;
for(int i = tid; i<MATRIX_ROWS*MATRIX_ROWS; i += BLOCK_NUM * THREAD_NUM)
{
sum = 0;
row = i / MATRIX_ROWS;
col = i % MATRIX_ROWS;
for(int j = 0; j<MATRIX_COLS; j++)
{
sum += A[row*MATRIX_COLS + j] * B[j*MATRIX_ROWS + col];
}
C[i] = sum;
}
}
__global__ static void MatrixMultOnebyOne(double *A, double *B, double *C)
{
double sum;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row < MATRIX_ROWS && col < MATRIX_ROWS)
{
sum = 0;
for(int i = 0; i<MATRIX_COLS; i++)
{
sum += A[row*MATRIX_COLS + i] * B[i*MATRIX_ROWS + col];
}
C[row*MATRIX_ROWS + col] = sum;
}
}
//template <int blockSize>
__global__ void MatrixMultSharedMemory(double *A, double *B, double *C)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int x = threadIdx.x;
int y = threadIdx.y;
double tsum = 0;
for(int i = 0; i<MATRIX_COLS; i+=SHARED_BLOCK_SIZE)
{
__shared__ double As[SHARED_BLOCK_SIZE][SHARED_BLOCK_SIZE];
__shared__ double Bs[SHARED_BLOCK_SIZE][SHARED_BLOCK_SIZE];
if(bx * SHARED_BLOCK_SIZE + x < MATRIX_ROWS && i + y < MATRIX_COLS)
As[x][y] = A[bx*SHARED_BLOCK_SIZE*MATRIX_COLS + x*MATRIX_COLS + i + y];
else
As[x][y] = 0;
if(x + i < MATRIX_COLS && by * SHARED_BLOCK_SIZE + y < MATRIX_ROWS)
Bs[x][y] = B[x*MATRIX_ROWS + i*MATRIX_ROWS + by*SHARED_BLOCK_SIZE + y];
else
Bs[x][y] = 0;
__syncthreads();
for(int k = 0; k<SHARED_BLOCK_SIZE; k++)
tsum += As[x][k] * Bs[k][y];
__syncthreads();
}
if(bx * SHARED_BLOCK_SIZE + x < MATRIX_ROWS && by * SHARED_BLOCK_SIZE + y < MATRIX_ROWS)
C[bx*SHARED_BLOCK_SIZE*MATRIX_ROWS + x*MATRIX_ROWS + by*SHARED_BLOCK_SIZE + y] = tsum;
}
void test_matrix_mult_cpu()
{
clock_t start, finish;
memset(C_cpu, 0, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
start = clock();
for(int i = 0; i<MATRIX_ROWS; i++)
{
for(int j = 0; j<MATRIX_ROWS; j++)
{
for(int k = 0; k<MATRIX_COLS; k++)
{
C_cpu[i*MATRIX_ROWS + j] += A[i*MATRIX_COLS + k] * B[k*MATRIX_ROWS + j];
}
}
}
finish = clock();
printf("Mode 0: CPU Calculation time: %lf\n",finish, start, CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_original()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
start = clock();
hipMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
hipMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
cal_start = clock();
hipLaunchKernelGGL(( MatrixMultOriginal), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, cuda_A, cuda_B, cuda_C);
hipMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, hipMemcpyDeviceToHost);
cal_finish = clock();
hipFree(cuda_A);
hipFree(cuda_B);
hipFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 1: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_one_by_one()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid_size((MATRIX_ROWS+BLOCK_SIZE-1)/BLOCK_SIZE, (MATRIX_ROWS + BLOCK_SIZE -1)/BLOCK_SIZE);
start = clock();
hipMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
hipMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
cal_start = clock();
hipLaunchKernelGGL(( MatrixMultOnebyOne), dim3(grid_size), dim3(block_size), 0, 0, cuda_A, cuda_B, cuda_C);
hipMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, hipMemcpyDeviceToHost);
cal_finish = clock();
hipFree(cuda_A);
hipFree(cuda_B);
hipFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 3: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_shared_memory()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
dim3 block_size(SHARED_BLOCK_SIZE, SHARED_BLOCK_SIZE, 1);
dim3 grid_size((MATRIX_ROWS+SHARED_BLOCK_SIZE-1)/SHARED_BLOCK_SIZE, (MATRIX_ROWS + SHARED_BLOCK_SIZE -1)/SHARED_BLOCK_SIZE);
start = clock();
hipMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
hipMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
cal_start = clock();
hipLaunchKernelGGL(( MatrixMultSharedMemory), dim3(grid_size), dim3(block_size), 0, 0, cuda_A, cuda_B, cuda_C);
hipMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, hipMemcpyDeviceToHost);
cal_finish = clock();
hipFree(cuda_A);
hipFree(cuda_B);
hipFree(cuda_C);
finish = clock();
// for(int idx = 0; idx < MATRIX_ROWS; idx ++)
// {
// for(int jdx = 0; jdx <MATRIX_ROWS; jdx++)
// printf("%lf %lf ", C[idx * MATRIX_ROWS + jdx], C_cpu[idx * MATRIX_ROWS + jdx]);
// printf("\n");
// }
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 4: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_transpose_B()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C, *cuda_transB;
start = clock();
hipMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_transB, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
hipMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
hipMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( TransposeMatrix), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, cuda_B, cuda_transB);
hipMemcpy(transB, cuda_transB, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, hipMemcpyDeviceToHost);
// for(int i = 0; i<MATRIX_COLS; i++)
// {
// for(int j = 0; j<MATRIX_ROWS; j++)
// printf("%lf ", B[i * MATRIX_ROWS + j]);
// printf("\n");
// }
//
// for(int i = 0; i<MATRIX_ROWS; i++)
// {
// for(int j = 0; j<MATRIX_COLS; j++)
// printf("%lf ", transB[i * MATRIX_COLS + j]);
// printf("\n");
// }
cal_start = clock();
hipLaunchKernelGGL(( MatrixMultTransposeB), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, cuda_A, cuda_B, cuda_transB, cuda_C);
hipMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, hipMemcpyDeviceToHost);
cal_finish = clock();
hipFree(cuda_A);
hipFree(cuda_B);
hipFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 2: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult()
{
generate_rand_matrix();
// for(int idx = 0; idx < MATRIX_ROWS; idx ++)
// {
// for(int jdx = 0; jdx <MATRIX_COLS; jdx++)
// printf("%lf ", A[idx * MATRIX_COLS + jdx]);
// printf("\n");
// }
// printf(" \n");
// for(int idx = 0; idx < MATRIX_COLS; idx ++)
// {
// for(int jdx = 0; jdx < MATRIX_ROWS; jdx++)
// printf("%lf ", B[idx * MATRIX_ROWS + jdx]);
// printf("\n");
// }
// printf(" \n");
printf("Test Matrix Multiplication(A(%dx%d) * B(%dx%d)): \n", MATRIX_ROWS, MATRIX_COLS, MATRIX_COLS, MATRIX_ROWS);
test_matrix_mult_cpu();
test_matrix_mult_gpu_original();
test_matrix_mult_gpu_transpose_B();
test_matrix_mult_gpu_one_by_one();
test_matrix_mult_gpu_shared_memory();
}
|
dde9e7cc23efbdf45dd682ae92227228ec21cb71.cu
|
#include "cuda_test.h"
double A[MATRIX_ROWS * MATRIX_COLS];
double B[MATRIX_ROWS * MATRIX_COLS];
double transB[MATRIX_COLS * MATRIX_ROWS];
double C[MATRIX_ROWS * MATRIX_ROWS];
double C_cpu[MATRIX_ROWS * MATRIX_ROWS];
void generate_rand_matrix()
{
srand((int)time(0));
for(int i = 0; i<MATRIX_ROWS; i++)
{
for(int j = 0; j<MATRIX_COLS; j++)
{
A[i * MATRIX_COLS + j] = (float)rand()/RAND_MAX + (float)rand()/RAND_MAX/RAND_MAX;
B[i * MATRIX_COLS + j] = (float)rand()/RAND_MAX + (float)rand()/RAND_MAX/RAND_MAX;
}
}
// for(int i = 0; i<MATRIX_ROWS * MATRIX_COLS; i++)
// printf("%lf ", B[i]);
// printf("\n");
}
__global__ static void TransposeMatrix(double *A, double *B)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
for(int i = tid; i<MATRIX_ROWS*MATRIX_COLS; i += BLOCK_NUM * THREAD_NUM)
{
int row = tid / MATRIX_COLS;
int col = tid % MATRIX_COLS;
if (row < MATRIX_ROWS && col < MATRIX_COLS)
B[row * MATRIX_COLS + col] = A[col * MATRIX_ROWS + row];
}
}
__global__ static void MatrixMultTransposeB(double *A, double * oriB, double *B, double *C)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
int row, col;
double sum;
for(int i = tid; i<MATRIX_ROWS*MATRIX_ROWS; i += BLOCK_NUM * THREAD_NUM)
{
sum = 0;
row = i / MATRIX_ROWS;
col = i % MATRIX_ROWS;
for(int j = 0; j<MATRIX_COLS; j++)
{
// if(oriB[j*MATRIX_ROWS + col]!=B[col * MATRIX_COLS + j])
// printf(" %d %d\n", row, col);
sum += A[row*MATRIX_COLS + j] * oriB[j*MATRIX_ROWS + col];//B[col * MATRIX_COLS + j];
}
C[i] = sum;
}
}
__global__ static void MatrixMultOriginal(double *A, double *B, double *C)
{
int tid = blockIdx.x * THREAD_NUM + threadIdx.x;
int row, col;
double sum;
for(int i = tid; i<MATRIX_ROWS*MATRIX_ROWS; i += BLOCK_NUM * THREAD_NUM)
{
sum = 0;
row = i / MATRIX_ROWS;
col = i % MATRIX_ROWS;
for(int j = 0; j<MATRIX_COLS; j++)
{
sum += A[row*MATRIX_COLS + j] * B[j*MATRIX_ROWS + col];
}
C[i] = sum;
}
}
__global__ static void MatrixMultOnebyOne(double *A, double *B, double *C)
{
double sum;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row < MATRIX_ROWS && col < MATRIX_ROWS)
{
sum = 0;
for(int i = 0; i<MATRIX_COLS; i++)
{
sum += A[row*MATRIX_COLS + i] * B[i*MATRIX_ROWS + col];
}
C[row*MATRIX_ROWS + col] = sum;
}
}
//template <int blockSize>
__global__ void MatrixMultSharedMemory(double *A, double *B, double *C)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int x = threadIdx.x;
int y = threadIdx.y;
double tsum = 0;
for(int i = 0; i<MATRIX_COLS; i+=SHARED_BLOCK_SIZE)
{
__shared__ double As[SHARED_BLOCK_SIZE][SHARED_BLOCK_SIZE];
__shared__ double Bs[SHARED_BLOCK_SIZE][SHARED_BLOCK_SIZE];
if(bx * SHARED_BLOCK_SIZE + x < MATRIX_ROWS && i + y < MATRIX_COLS)
As[x][y] = A[bx*SHARED_BLOCK_SIZE*MATRIX_COLS + x*MATRIX_COLS + i + y];
else
As[x][y] = 0;
if(x + i < MATRIX_COLS && by * SHARED_BLOCK_SIZE + y < MATRIX_ROWS)
Bs[x][y] = B[x*MATRIX_ROWS + i*MATRIX_ROWS + by*SHARED_BLOCK_SIZE + y];
else
Bs[x][y] = 0;
__syncthreads();
for(int k = 0; k<SHARED_BLOCK_SIZE; k++)
tsum += As[x][k] * Bs[k][y];
__syncthreads();
}
if(bx * SHARED_BLOCK_SIZE + x < MATRIX_ROWS && by * SHARED_BLOCK_SIZE + y < MATRIX_ROWS)
C[bx*SHARED_BLOCK_SIZE*MATRIX_ROWS + x*MATRIX_ROWS + by*SHARED_BLOCK_SIZE + y] = tsum;
}
void test_matrix_mult_cpu()
{
clock_t start, finish;
memset(C_cpu, 0, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
start = clock();
for(int i = 0; i<MATRIX_ROWS; i++)
{
for(int j = 0; j<MATRIX_ROWS; j++)
{
for(int k = 0; k<MATRIX_COLS; k++)
{
C_cpu[i*MATRIX_ROWS + j] += A[i*MATRIX_COLS + k] * B[k*MATRIX_ROWS + j];
}
}
}
finish = clock();
printf("Mode 0: CPU Calculation time: %lf\n",finish, start, CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_original()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
start = clock();
cudaMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
cudaMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cal_start = clock();
MatrixMultOriginal<<<BLOCK_NUM, THREAD_NUM, 0>>>(cuda_A, cuda_B, cuda_C);
cudaMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, cudaMemcpyDeviceToHost);
cal_finish = clock();
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 1: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_one_by_one()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid_size((MATRIX_ROWS+BLOCK_SIZE-1)/BLOCK_SIZE, (MATRIX_ROWS + BLOCK_SIZE -1)/BLOCK_SIZE);
start = clock();
cudaMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
cudaMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cal_start = clock();
MatrixMultOnebyOne<<<grid_size, block_size, 0>>>(cuda_A, cuda_B, cuda_C);
cudaMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, cudaMemcpyDeviceToHost);
cal_finish = clock();
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 3: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_shared_memory()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C;
dim3 block_size(SHARED_BLOCK_SIZE, SHARED_BLOCK_SIZE, 1);
dim3 grid_size((MATRIX_ROWS+SHARED_BLOCK_SIZE-1)/SHARED_BLOCK_SIZE, (MATRIX_ROWS + SHARED_BLOCK_SIZE -1)/SHARED_BLOCK_SIZE);
start = clock();
cudaMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
cudaMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cal_start = clock();
MatrixMultSharedMemory<<<grid_size, block_size, 0>>>(cuda_A, cuda_B, cuda_C);
cudaMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, cudaMemcpyDeviceToHost);
cal_finish = clock();
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
finish = clock();
// for(int idx = 0; idx < MATRIX_ROWS; idx ++)
// {
// for(int jdx = 0; jdx <MATRIX_ROWS; jdx++)
// printf("%lf %lf ", C[idx * MATRIX_ROWS + jdx], C_cpu[idx * MATRIX_ROWS + jdx]);
// printf("\n");
// }
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 4: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult_gpu_transpose_B()
{
clock_t start, finish, cal_start, cal_finish;
double *cuda_A, *cuda_B, *cuda_C, *cuda_transB;
start = clock();
cudaMalloc((void**)&cuda_A, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_B, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_transB, sizeof(double) * MATRIX_ROWS * MATRIX_COLS);
cudaMalloc((void**)&cuda_C, sizeof(double) * MATRIX_ROWS * MATRIX_ROWS);
cudaMemcpy(cuda_A, A, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyHostToDevice);
TransposeMatrix<<<BLOCK_NUM, THREAD_NUM, 0>>>(cuda_B, cuda_transB);
cudaMemcpy(transB, cuda_transB, sizeof(double)*MATRIX_ROWS*MATRIX_COLS, cudaMemcpyDeviceToHost);
// for(int i = 0; i<MATRIX_COLS; i++)
// {
// for(int j = 0; j<MATRIX_ROWS; j++)
// printf("%lf ", B[i * MATRIX_ROWS + j]);
// printf("\n");
// }
//
// for(int i = 0; i<MATRIX_ROWS; i++)
// {
// for(int j = 0; j<MATRIX_COLS; j++)
// printf("%lf ", transB[i * MATRIX_COLS + j]);
// printf("\n");
// }
cal_start = clock();
MatrixMultTransposeB<<<BLOCK_NUM, THREAD_NUM, 0>>>(cuda_A, cuda_B, cuda_transB, cuda_C);
cudaMemcpy(C, cuda_C, sizeof(double)*MATRIX_ROWS*MATRIX_ROWS, cudaMemcpyDeviceToHost);
cal_finish = clock();
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
finish = clock();
double total_err = 0;
for(int i = 0; i<MATRIX_ROWS*MATRIX_ROWS; i++)
{
total_err += fabs(C[i] - C_cpu[i]);
}
printf("Mode 2: Total error: %lf, GPU Calculation time: %lf, Total time: %lf \n", total_err, 1.0 * (cal_finish - cal_start) / CLOCKS_PER_SEC, 1.0 * (finish - start) / CLOCKS_PER_SEC);
}
void test_matrix_mult()
{
generate_rand_matrix();
// for(int idx = 0; idx < MATRIX_ROWS; idx ++)
// {
// for(int jdx = 0; jdx <MATRIX_COLS; jdx++)
// printf("%lf ", A[idx * MATRIX_COLS + jdx]);
// printf("\n");
// }
// printf(" \n");
// for(int idx = 0; idx < MATRIX_COLS; idx ++)
// {
// for(int jdx = 0; jdx < MATRIX_ROWS; jdx++)
// printf("%lf ", B[idx * MATRIX_ROWS + jdx]);
// printf("\n");
// }
// printf(" \n");
printf("Test Matrix Multiplication(A(%dx%d) * B(%dx%d)): \n", MATRIX_ROWS, MATRIX_COLS, MATRIX_COLS, MATRIX_ROWS);
test_matrix_mult_cpu();
test_matrix_mult_gpu_original();
test_matrix_mult_gpu_transpose_B();
test_matrix_mult_gpu_one_by_one();
test_matrix_mult_gpu_shared_memory();
}
|
74f4378d71278bb62ad1261f0dedbf463882c125.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/volumetric_upsampling.h"
#include "octnet/gpu/gpu.h"
__global__ void kernel_volumetric_nn_upsampling_cdhw(ot_data_t* out, int n_vxs, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, const ot_data_t* in) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
CUDA_KERNEL_LOOP(vx_idx, n_vxs) {
int n = vx_idx / (out_depth * out_height * out_width);
int ow = vx_idx % out_width;
int oh = ((vx_idx - ow) / out_width) % out_height;
int od = (((((vx_idx - ow) / out_width) - oh) / out_height) % out_depth);
int id = od / upsampling_factor;
int ih = oh / upsampling_factor;
int iw = ow / upsampling_factor;
for(int f = 0; f < feature_size; ++f) {
int in_idx = (((n * feature_size + f) * in_depth + id) * in_height + ih) * in_width + iw;
int out_idx = (((n * feature_size + f) * out_depth + od) * out_height + oh) * out_width + ow;
out[out_idx] = in[in_idx];
}
}
}
void volumetric_nn_upsampling_cdhw_gpu(const ot_data_t* in, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, ot_data_t* out) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
int n_vxs = n * out_depth * out_height * out_width;
hipLaunchKernelGGL(( kernel_volumetric_nn_upsampling_cdhw), dim3(GET_BLOCKS(n_vxs)), dim3(CUDA_NUM_THREADS), 0, 0,
out, n_vxs, n, in_depth, in_height, in_width, feature_size, upsampling_factor, in
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_volumetric_nn_upsampling_cdhw_bwd(ot_data_t* grad_in, int n_vxs, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, const ot_data_t* grad_out) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
CUDA_KERNEL_LOOP(vx_idx, n_vxs) {
int n = vx_idx / (in_depth * in_height * in_width);
int iw = vx_idx % in_width;
int ih = ((vx_idx - iw) / in_width) % in_height;
int id = (((((vx_idx - iw) / in_width) - ih) / in_height) % in_depth);
for(int f = 0; f < feature_size; ++f) {
int in_idx = (((n * feature_size + f) * in_depth + id) * in_height + ih) * in_width + iw;
grad_in[in_idx] = 0;
for(int d = 0; d < upsampling_factor; ++ d) {
for(int h = 0; h < upsampling_factor; ++ h) {
for(int w = 0; w < upsampling_factor; ++ w) {
int od = id * upsampling_factor + d;
int oh = ih * upsampling_factor + h;
int ow = iw * upsampling_factor + w;
int out_idx = (((n * feature_size + f) * out_depth + od) * out_height + oh) * out_width + ow;
grad_in[in_idx] += grad_out[out_idx];
}
}
}
}
}
}
void volumetric_nn_upsampling_cdhw_bwd_gpu(const ot_data_t* grad_out, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, ot_data_t* grad_in) {
int n_vxs = n * in_depth * in_height * in_width;
hipLaunchKernelGGL(( kernel_volumetric_nn_upsampling_cdhw_bwd), dim3(GET_BLOCKS(n_vxs)), dim3(CUDA_NUM_THREADS), 0, 0,
grad_in, n_vxs, n, in_depth, in_height, in_width, feature_size, upsampling_factor, grad_out
);
CUDA_POST_KERNEL_CHECK;
}
|
74f4378d71278bb62ad1261f0dedbf463882c125.cu
|
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/volumetric_upsampling.h"
#include "octnet/gpu/gpu.h"
__global__ void kernel_volumetric_nn_upsampling_cdhw(ot_data_t* out, int n_vxs, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, const ot_data_t* in) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
CUDA_KERNEL_LOOP(vx_idx, n_vxs) {
int n = vx_idx / (out_depth * out_height * out_width);
int ow = vx_idx % out_width;
int oh = ((vx_idx - ow) / out_width) % out_height;
int od = (((((vx_idx - ow) / out_width) - oh) / out_height) % out_depth);
int id = od / upsampling_factor;
int ih = oh / upsampling_factor;
int iw = ow / upsampling_factor;
for(int f = 0; f < feature_size; ++f) {
int in_idx = (((n * feature_size + f) * in_depth + id) * in_height + ih) * in_width + iw;
int out_idx = (((n * feature_size + f) * out_depth + od) * out_height + oh) * out_width + ow;
out[out_idx] = in[in_idx];
}
}
}
void volumetric_nn_upsampling_cdhw_gpu(const ot_data_t* in, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, ot_data_t* out) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
int n_vxs = n * out_depth * out_height * out_width;
kernel_volumetric_nn_upsampling_cdhw<<<GET_BLOCKS(n_vxs), CUDA_NUM_THREADS>>>(
out, n_vxs, n, in_depth, in_height, in_width, feature_size, upsampling_factor, in
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_volumetric_nn_upsampling_cdhw_bwd(ot_data_t* grad_in, int n_vxs, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, const ot_data_t* grad_out) {
int out_depth = upsampling_factor * in_depth;
int out_height = upsampling_factor * in_height;
int out_width = upsampling_factor * in_width;
CUDA_KERNEL_LOOP(vx_idx, n_vxs) {
int n = vx_idx / (in_depth * in_height * in_width);
int iw = vx_idx % in_width;
int ih = ((vx_idx - iw) / in_width) % in_height;
int id = (((((vx_idx - iw) / in_width) - ih) / in_height) % in_depth);
for(int f = 0; f < feature_size; ++f) {
int in_idx = (((n * feature_size + f) * in_depth + id) * in_height + ih) * in_width + iw;
grad_in[in_idx] = 0;
for(int d = 0; d < upsampling_factor; ++ d) {
for(int h = 0; h < upsampling_factor; ++ h) {
for(int w = 0; w < upsampling_factor; ++ w) {
int od = id * upsampling_factor + d;
int oh = ih * upsampling_factor + h;
int ow = iw * upsampling_factor + w;
int out_idx = (((n * feature_size + f) * out_depth + od) * out_height + oh) * out_width + ow;
grad_in[in_idx] += grad_out[out_idx];
}
}
}
}
}
}
void volumetric_nn_upsampling_cdhw_bwd_gpu(const ot_data_t* grad_out, int n, int in_depth, int in_height, int in_width, int feature_size, int upsampling_factor, ot_data_t* grad_in) {
int n_vxs = n * in_depth * in_height * in_width;
kernel_volumetric_nn_upsampling_cdhw_bwd<<<GET_BLOCKS(n_vxs), CUDA_NUM_THREADS>>>(
grad_in, n_vxs, n, in_depth, in_height, in_width, feature_size, upsampling_factor, grad_out
);
CUDA_POST_KERNEL_CHECK;
}
|
0d22d4ff42ce1fd0426e08e8b79f4507ac276e49.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TRWP.h"
#include "commonCUDA.cuh"
#include "TRWP_soft.cuh"
#ifdef __cplusplus
extern "C" {
#endif
__device__ void DynamicProgrammingBack(const Param param,
const uint n_thread_a_tree,
const uint current_node_h,
const uint current_node_w,
const uint front_node_h,
const uint front_node_w,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights,
float* dmsg_update_shared,
float* msg_min_value_shared,
float* msg_edge_label_shared,
float* msg_edge_label_exp_shared) {
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
bool is_pass_l2r = param.is_pass_l2r;
float rho = param.rho;
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
uint n_disp_with_warp = (n_disp + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE;
uint max_parallel_disps = min(n_disp, blockDim.x / n_disp_with_warp);
uint n_iters = (n_disp + max_parallel_disps - 1) / max_parallel_disps;
bool enable_seg = (n_disp == 21);
uint id_base = tid / (n_trees * n_thread_a_tree);
uint unary_base = id_base * height * width * n_disp;
uint edge_base = id_base * height * width;
uint msg_edge_label_base = id_base * height * width * n_disp * n_disp;
uint current_d_base = threadIdx.x / n_disp_with_warp;
uint msg_index_offset = id_base * height * width + current_node_h * width + current_node_w;
uchar norm_index = msg_norm_index[msg_index_offset];
if (threadIdx.x < n_disp) {
uint current_d = threadIdx.x;
uint msg_offset = unary_base + current_node_h * width * n_disp + current_node_w * n_disp + current_d;
dmsg_update_shared[threadIdx.x] = dmsg[msg_offset];
}
__syncthreads();
// Back norm
uint current_d_4norm = threadIdx.x % n_disp_with_warp;
float gradient = 0;
// A patch: current_d_4norm above may exceed MAX_DISPARITY
if (current_d_4norm < MAX_DISPARITY) gradient = dmsg_update_shared[current_d_4norm];
__syncthreads();
float gradient_sum = sumMsg(n_disp, current_d_base, gradient);
if (threadIdx.x == 0) dmsg_update_shared[norm_index] -= gradient_sum;
__syncthreads();
uint offset_base = unary_base + front_node_h * width * n_disp + front_node_w * n_disp;
uint front_d = threadIdx.x % n_disp_with_warp;
uint unary_offset = offset_base + front_d;
for (uint iter = 0; iter < n_iters; ++iter) {
uint current_d = iter * max_parallel_disps + current_d_base;
bool is_valid_thread = (front_d < n_disp) && (current_d < n_disp);
bool enable_valid_assign = is_valid_thread && (threadIdx.x % n_disp_with_warp == 0);
uint lr_id = current_d_base * n_disp_with_warp + front_d;
uint msg_edge_label_loc = is_pass_l2r ? (front_d * n_disp + current_d) : (current_d * n_disp + front_d);
uint msg_edge_label_add = msg_edge_label_base + current_node_h * width * n_disp * n_disp
+ current_node_w * n_disp * n_disp + msg_edge_label_loc;
// Calculate p * (1 - msg_edge_label + msg)
if (is_valid_thread) msg_edge_label_shared[lr_id] = msg_edge_label[msg_edge_label_add];
__syncthreads();
// ==== BEGIN: from forward, re-calculate prob and msg_soft_sum
// Find the min value among front_d
float min_value = findMsgMin(n_disp, front_d, current_d_base, msg_edge_label_shared[lr_id]);
if (enable_valid_assign) msg_min_value_shared[current_d] = min_value;
__syncthreads();
// Let msg_edge_label subtracts min_value
if (is_valid_thread)
msg_edge_label_exp_shared[lr_id] = __expf(-msg_edge_label_shared[lr_id] + msg_min_value_shared[current_d]);
__syncthreads();
// Soft message
float sum_exp = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]);
float prob = msg_edge_label_exp_shared[lr_id] / sum_exp;
float msg_soft = prob * msg_edge_label_shared[lr_id];
if (is_valid_thread) msg_edge_label_exp_shared[lr_id] = msg_soft;
__syncthreads();
// Sum soft message over front_d
float msg_soft_sum = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]);
if (enable_valid_assign) msg_min_value_shared[current_d] = msg_soft_sum;
__syncthreads();
// ==== END: From forward
if (is_valid_thread) {
// Calculate dmsg_edge_label
float dmsg_sum = dmsg_update_shared[current_d];
float msg_edge_label_one = msg_edge_label_shared[lr_id];
float dmsg_edge_label = dmsg_sum * prob * (1 - msg_edge_label_one + msg_soft_sum);
uint context_loc = 0;
if (enable_seg)
context_loc = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_loc = std::abs(int(current_d) - int(front_d));
uint edge_weight_loc = edge_base + current_node_h * width + current_node_w;
atomicAdd(&dunary_update[unary_offset], dmsg_edge_label);
atomicAdd(&dmsg[unary_offset], rho * dmsg_edge_label);
atomicAdd(&dedge_weights[edge_weight_loc], context[context_loc] * dmsg_edge_label);
atomicAdd(&dcontext[context_loc], edge_weights[edge_weight_loc] * dmsg_edge_label);
}
__syncthreads();
}
}
__global__ void CostAggregateKernelSoftBack(const Param param,
const uint n_thread_required,
float* dcost_final_ptr,
float* dunary,
float* dmsg_ptr) {
// cost_final=unary+sum{msg_update}
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n_thread_required) return;
float dcost_final_value = dcost_final_ptr[tid];
dunary[tid] = dcost_final_value;
for (uint dir = 0; dir < param.n_dir; ++dir)
dmsg_ptr[dir * n_thread_required + tid] = dcost_final_value;
__syncthreads();
}
__global__ void UpdateUnaryKernelSoftBack(const Param param,
const uint n_thread_required,
float* dunary_update_ptr,
float* dunary_ptr,
float* dmsg_ptr) {
// unary_update=rho*(unary+sum{msg}-msg_dir)-msg_dir_inv
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*n_cv*h*w*n_disp
if (tid >= n_thread_required) return;
uint dir = param.dir, dir_inv = param.dir_inv, n_dir = param.n_dir;
float rho = param.rho;
float dunary_update_value = dunary_update_ptr[tid];
float dunary_update_value_rho = rho * dunary_update_value;
for (uint dir = 0; dir < n_dir; ++dir)
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], dunary_update_value_rho);
atomicAdd(&dunary_ptr[tid], dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], -dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir_inv * n_thread_required + tid], -dunary_update_value);
__syncthreads();
}
__global__ void HorizontalKernelSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint width = param.width, n_trees = param.n_trees;
int w_step = param.w_step;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int h_start = tree_id, w_start = (w_step > 0) ? 0 : (width - 1);
uint roll_step = width - 1;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_w && current_node_w < width &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
__global__ void DiagonalKernelNarrowSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint height = param.height, width = param.width, n_trees = param.n_trees;
int h_step = param.h_step, w_step = param.w_step;
uint h_step_abs = std::abs(h_step);
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
int common1 = tree_id_shift % h_step_abs;
float common2 = float(tree_id_shift) / float(h_step_abs); // This must be float NOT int, will affect ceilf and floorf
int h_start = 0, w_start = 0;
// Use a common mode to calculate start points for shortest chains, read my notes for clarity
if (w_step > 0) {
h_start = (h_step_abs - common1) % h_step_abs;
w_start = ceilf(common2);
} else {
h_start = common1;
w_start = floorf(common2);
}
if (h_step < 0) h_start = height - 1 - h_start;
uint roll_step = (height - 1) / h_step_abs;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
__global__ void DiagonalKernelWideSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint height = param.height, width = param.width, n_trees = param.n_trees;
int h_step = param.h_step, w_step = param.w_step;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
uint h_step_abs = std::abs(h_step), roll_step = (height - 1) / h_step_abs;
int h_start = (h_step > 0) ? 0 : (height - 1), w_start = tree_id_shift;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
void BackwardCUDASoft(const float rho,
const at::Tensor dcost_final,
const at::Tensor context,
const at::Tensor edge_weights,
const at::Tensor msg_edge_label,
const at::Tensor msg_norm_index,
at::Tensor dunary,
at::Tensor dcontext,
at::Tensor dedge_weights,
at::Tensor dmsg,
at::Tensor dunary_update) {
const uint n_iter = msg_edge_label.size(0);
const uint n_dir = msg_edge_label.size(1);
const uint batch = msg_edge_label.size(2);
const uint n_cv = msg_edge_label.size(3);
const uint height = msg_edge_label.size(4);
const uint width = msg_edge_label.size(5);
const uint n_disp = msg_edge_label.size(6);
float* dcost_final_ptr = dcost_final.data<float>();
float* context_ptr = context.data<float>();
float* edge_weight_ptr = edge_weights.data<float>();
float* msg_edge_label_ptr = msg_edge_label.data<float>(); // (n_iter,n_dir,batch,n_cv,h,w,n_disp,n_disp)
uchar* msg_norm_index_ptr = msg_norm_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w)
float* dunary_ptr = dunary.data<float>(); // (batch,n_cv,h,w,n_disp)
float* dcontext_ptr = dcontext.data<float>();
float* dedge_weight_ptr = dedge_weights.data<float>();
float* dmsg_ptr = dmsg.data<float>();
float* dunary_update_ptr = dunary_update.data<float>();
uint n_disp_with_warp = GetNumThreadATree(n_disp, WARP_SIZE);
uint n_thread_a_tree = min(n_disp, MAX_THREADS_PER_BLOCK / n_disp_with_warp) * n_disp_with_warp;
bool is_backward = true, is_training = true;
std::vector<float*> dmsg_address(n_dir), edge_weight_address(n_dir);
std::vector<float*> dedge_weight_address(n_dir), msg_edge_label_address(n_dir);
std::vector<uchar*> msg_norm_index_address(n_dir);
std::vector<Param> param_list;
uint msg_min_size = batch * n_cv * height * width * n_disp;
uint msg_norm_size = msg_min_size / n_disp;
uint msg_edge_label_size = n_dir * msg_min_size * n_disp;
uint msg_norm_index_size = n_dir * msg_norm_size;
uint n_thread_unary = min(MAX_THREADS_PER_BLOCK, msg_min_size);
uint n_block_unary = (msg_min_size + n_thread_unary - 1) / n_thread_unary;
for (int dir = 0; dir < n_dir; ++dir) {
edge_weight_address[dir] = edge_weight_ptr + dir * msg_norm_size;
dedge_weight_address[dir] = dedge_weight_ptr + dir * msg_norm_size;
dmsg_address[dir] = dmsg_ptr + dir * msg_min_size;
Param param(n_dir, batch, n_cv, height, width, n_disp, dir, rho, is_backward, is_training);
UpdateParam(¶m);
param_list.push_back(param);
}
hipLaunchKernelGGL(( CostAggregateKernelSoftBack), dim3(n_block_unary), dim3(n_thread_unary), 0, 0, param_list[0],
msg_min_size,
dcost_final_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
for (int iter = n_iter - 1; iter >= 0; --iter) {
for (int dir = n_dir - 1; dir >= 0; --dir) {
msg_edge_label_address[dir] = msg_edge_label_ptr + iter * msg_edge_label_size + dir * msg_edge_label_size / n_dir;
msg_norm_index_address[dir] = msg_norm_index_ptr + iter * msg_norm_index_size + dir * msg_norm_size;
uint n_threads = batch * n_cv * param_list[dir].n_trees * n_thread_a_tree;
uint n_blocks = GetNumBlock(n_threads, n_thread_a_tree);
// Diagonal
if (4 <= dir) {
uint h_step_abs = std::abs(param_list[dir].h_step);
uint w_step_abs = std::abs(param_list[dir].w_step);
if (h_step_abs > w_step_abs)
hipLaunchKernelGGL(( DiagonalKernelNarrowSoftBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
else
hipLaunchKernelGGL(( DiagonalKernelWideSoftBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
// Vertical
if ((2 <= dir) && (dir < 4))
hipLaunchKernelGGL(( DiagonalKernelWideSoftBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
// Horizontal
if (dir < 2)
hipLaunchKernelGGL(( HorizontalKernelSoftBack), dim3(n_blocks), dim3(n_thread_a_tree), 0, 0, param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
hipLaunchKernelGGL(( UpdateUnaryKernelSoftBack), dim3(n_block_unary), dim3(n_thread_unary), 0, 0, param_list[dir],
msg_min_size,
dunary_update_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
hipMemset(dunary_update_ptr, 0, msg_min_size * sizeof(float));
hipMemset(dmsg_address[dir], 0, msg_min_size * sizeof(float));
}
}
for (uint dir = 0; dir < n_dir; ++dir) {
if (dmsg_address[dir] != nullptr) dmsg_address[dir] = nullptr;
if (msg_edge_label_address[dir] != nullptr) msg_edge_label_address[dir] = nullptr;
if (msg_norm_index_address[dir] != nullptr) msg_norm_index_address[dir] = nullptr;
if (edge_weight_address[dir] != nullptr) edge_weight_address[dir] = nullptr;
if (dedge_weight_address[dir] != nullptr) dedge_weight_address[dir] = nullptr;
}
}
#ifdef __cplusplus
}
#endif
|
0d22d4ff42ce1fd0426e08e8b79f4507ac276e49.cu
|
#include "TRWP.h"
#include "commonCUDA.cuh"
#include "TRWP_soft.cuh"
#ifdef __cplusplus
extern "C" {
#endif
__device__ void DynamicProgrammingBack(const Param param,
const uint n_thread_a_tree,
const uint current_node_h,
const uint current_node_w,
const uint front_node_h,
const uint front_node_w,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights,
float* dmsg_update_shared,
float* msg_min_value_shared,
float* msg_edge_label_shared,
float* msg_edge_label_exp_shared) {
uint height = param.height, width = param.width;
uint n_disp = param.n_disp, n_trees = param.n_trees;
bool is_pass_l2r = param.is_pass_l2r;
float rho = param.rho;
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
uint n_disp_with_warp = (n_disp + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE;
uint max_parallel_disps = min(n_disp, blockDim.x / n_disp_with_warp);
uint n_iters = (n_disp + max_parallel_disps - 1) / max_parallel_disps;
bool enable_seg = (n_disp == 21);
uint id_base = tid / (n_trees * n_thread_a_tree);
uint unary_base = id_base * height * width * n_disp;
uint edge_base = id_base * height * width;
uint msg_edge_label_base = id_base * height * width * n_disp * n_disp;
uint current_d_base = threadIdx.x / n_disp_with_warp;
uint msg_index_offset = id_base * height * width + current_node_h * width + current_node_w;
uchar norm_index = msg_norm_index[msg_index_offset];
if (threadIdx.x < n_disp) {
uint current_d = threadIdx.x;
uint msg_offset = unary_base + current_node_h * width * n_disp + current_node_w * n_disp + current_d;
dmsg_update_shared[threadIdx.x] = dmsg[msg_offset];
}
__syncthreads();
// Back norm
uint current_d_4norm = threadIdx.x % n_disp_with_warp;
float gradient = 0;
// A patch: current_d_4norm above may exceed MAX_DISPARITY
if (current_d_4norm < MAX_DISPARITY) gradient = dmsg_update_shared[current_d_4norm];
__syncthreads();
float gradient_sum = sumMsg(n_disp, current_d_base, gradient);
if (threadIdx.x == 0) dmsg_update_shared[norm_index] -= gradient_sum;
__syncthreads();
uint offset_base = unary_base + front_node_h * width * n_disp + front_node_w * n_disp;
uint front_d = threadIdx.x % n_disp_with_warp;
uint unary_offset = offset_base + front_d;
for (uint iter = 0; iter < n_iters; ++iter) {
uint current_d = iter * max_parallel_disps + current_d_base;
bool is_valid_thread = (front_d < n_disp) && (current_d < n_disp);
bool enable_valid_assign = is_valid_thread && (threadIdx.x % n_disp_with_warp == 0);
uint lr_id = current_d_base * n_disp_with_warp + front_d;
uint msg_edge_label_loc = is_pass_l2r ? (front_d * n_disp + current_d) : (current_d * n_disp + front_d);
uint msg_edge_label_add = msg_edge_label_base + current_node_h * width * n_disp * n_disp
+ current_node_w * n_disp * n_disp + msg_edge_label_loc;
// Calculate p * (1 - msg_edge_label + msg)
if (is_valid_thread) msg_edge_label_shared[lr_id] = msg_edge_label[msg_edge_label_add];
__syncthreads();
// ==== BEGIN: from forward, re-calculate prob and msg_soft_sum
// Find the min value among front_d
float min_value = findMsgMin(n_disp, front_d, current_d_base, msg_edge_label_shared[lr_id]);
if (enable_valid_assign) msg_min_value_shared[current_d] = min_value;
__syncthreads();
// Let msg_edge_label subtracts min_value
if (is_valid_thread)
msg_edge_label_exp_shared[lr_id] = __expf(-msg_edge_label_shared[lr_id] + msg_min_value_shared[current_d]);
__syncthreads();
// Soft message
float sum_exp = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]);
float prob = msg_edge_label_exp_shared[lr_id] / sum_exp;
float msg_soft = prob * msg_edge_label_shared[lr_id];
if (is_valid_thread) msg_edge_label_exp_shared[lr_id] = msg_soft;
__syncthreads();
// Sum soft message over front_d
float msg_soft_sum = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]);
if (enable_valid_assign) msg_min_value_shared[current_d] = msg_soft_sum;
__syncthreads();
// ==== END: From forward
if (is_valid_thread) {
// Calculate dmsg_edge_label
float dmsg_sum = dmsg_update_shared[current_d];
float msg_edge_label_one = msg_edge_label_shared[lr_id];
float dmsg_edge_label = dmsg_sum * prob * (1 - msg_edge_label_one + msg_soft_sum);
uint context_loc = 0;
if (enable_seg)
context_loc = min(current_d, front_d) * n_disp + max(current_d, front_d);
else
context_loc = std::abs(int(current_d) - int(front_d));
uint edge_weight_loc = edge_base + current_node_h * width + current_node_w;
atomicAdd(&dunary_update[unary_offset], dmsg_edge_label);
atomicAdd(&dmsg[unary_offset], rho * dmsg_edge_label);
atomicAdd(&dedge_weights[edge_weight_loc], context[context_loc] * dmsg_edge_label);
atomicAdd(&dcontext[context_loc], edge_weights[edge_weight_loc] * dmsg_edge_label);
}
__syncthreads();
}
}
__global__ void CostAggregateKernelSoftBack(const Param param,
const uint n_thread_required,
float* dcost_final_ptr,
float* dunary,
float* dmsg_ptr) {
// cost_final=unary+sum{msg_update}
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n_thread_required) return;
float dcost_final_value = dcost_final_ptr[tid];
dunary[tid] = dcost_final_value;
for (uint dir = 0; dir < param.n_dir; ++dir)
dmsg_ptr[dir * n_thread_required + tid] = dcost_final_value;
__syncthreads();
}
__global__ void UpdateUnaryKernelSoftBack(const Param param,
const uint n_thread_required,
float* dunary_update_ptr,
float* dunary_ptr,
float* dmsg_ptr) {
// unary_update=rho*(unary+sum{msg}-msg_dir)-msg_dir_inv
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*n_cv*h*w*n_disp
if (tid >= n_thread_required) return;
uint dir = param.dir, dir_inv = param.dir_inv, n_dir = param.n_dir;
float rho = param.rho;
float dunary_update_value = dunary_update_ptr[tid];
float dunary_update_value_rho = rho * dunary_update_value;
for (uint dir = 0; dir < n_dir; ++dir)
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], dunary_update_value_rho);
atomicAdd(&dunary_ptr[tid], dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], -dunary_update_value_rho);
atomicAdd(&dmsg_ptr[dir_inv * n_thread_required + tid], -dunary_update_value);
__syncthreads();
}
__global__ void HorizontalKernelSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint width = param.width, n_trees = param.n_trees;
int w_step = param.w_step;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int h_start = tree_id, w_start = (w_step > 0) ? 0 : (width - 1);
uint roll_step = width - 1;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_w && current_node_w < width &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
__global__ void DiagonalKernelNarrowSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint height = param.height, width = param.width, n_trees = param.n_trees;
int h_step = param.h_step, w_step = param.w_step;
uint h_step_abs = std::abs(h_step);
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
int common1 = tree_id_shift % h_step_abs;
float common2 = float(tree_id_shift) / float(h_step_abs); // This must be float NOT int, will affect ceilf and floorf
int h_start = 0, w_start = 0;
// Use a common mode to calculate start points for shortest chains, read my notes for clarity
if (w_step > 0) {
h_start = (h_step_abs - common1) % h_step_abs;
w_start = ceilf(common2);
} else {
h_start = common1;
w_start = floorf(common2);
}
if (h_step < 0) h_start = height - 1 - h_start;
uint roll_step = (height - 1) / h_step_abs;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
__global__ void DiagonalKernelWideSoftBack(const Param param,
const uint n_thread_required,
const uint n_thread_a_tree,
const float* context,
const float* edge_weights,
const float* msg_edge_label,
const uchar* msg_norm_index,
float* dmsg,
float* dunary_update,
float* dcontext,
float* dedge_weights) {
static __shared__ float dmsg_update_shared[MAX_DISPARITY];
static __shared__ float msg_min_value_shared[MAX_DISPARITY];
static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK];
static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK];
msg_edge_label_shared[threadIdx.x] = 0;
msg_edge_label_exp_shared[threadIdx.x] = 0;
if (threadIdx.x < MAX_DISPARITY) {
msg_min_value_shared[threadIdx.x] = 0;
dmsg_update_shared[threadIdx.x] = 0;
}
__syncthreads();
uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree
if (tid >= n_thread_required) return;
uint height = param.height, width = param.width, n_trees = param.n_trees;
int h_step = param.h_step, w_step = param.w_step;
uint tree_id = (tid / n_thread_a_tree) % n_trees;
int tree_id_shift = tree_id - (height - 1) * max(w_step, 0);
uint h_step_abs = std::abs(h_step), roll_step = (height - 1) / h_step_abs;
int h_start = (h_step > 0) ? 0 : (height - 1), w_start = tree_id_shift;
// The front node is in accordance with forward pass, use + *_step
// msg_min_index(batch,n_cv,h,w,n_disp)
for (uint i = 0; i <= roll_step; ++i) {
int current_node_h = h_start + i * h_step;
int current_node_w = w_start + i * w_step;
int front_node_h = current_node_h + h_step;
int front_node_w = current_node_w + w_step;
if (0 <= current_node_h && current_node_h < height &&
0 <= current_node_w && current_node_w < width &&
0 <= front_node_h && front_node_h < height &&
0 <= front_node_w && front_node_w < width)
DynamicProgrammingBack(param,
n_thread_a_tree,
current_node_h,
current_node_w,
front_node_h,
front_node_w,
context,
edge_weights,
msg_edge_label,
msg_norm_index,
dmsg,
dunary_update,
dcontext,
dedge_weights,
dmsg_update_shared,
msg_min_value_shared,
msg_edge_label_shared,
msg_edge_label_exp_shared);
__syncthreads();
}
}
void BackwardCUDASoft(const float rho,
const at::Tensor dcost_final,
const at::Tensor context,
const at::Tensor edge_weights,
const at::Tensor msg_edge_label,
const at::Tensor msg_norm_index,
at::Tensor dunary,
at::Tensor dcontext,
at::Tensor dedge_weights,
at::Tensor dmsg,
at::Tensor dunary_update) {
const uint n_iter = msg_edge_label.size(0);
const uint n_dir = msg_edge_label.size(1);
const uint batch = msg_edge_label.size(2);
const uint n_cv = msg_edge_label.size(3);
const uint height = msg_edge_label.size(4);
const uint width = msg_edge_label.size(5);
const uint n_disp = msg_edge_label.size(6);
float* dcost_final_ptr = dcost_final.data<float>();
float* context_ptr = context.data<float>();
float* edge_weight_ptr = edge_weights.data<float>();
float* msg_edge_label_ptr = msg_edge_label.data<float>(); // (n_iter,n_dir,batch,n_cv,h,w,n_disp,n_disp)
uchar* msg_norm_index_ptr = msg_norm_index.data<uchar>(); // (n_iter,n_dir,batch,n_cv,h,w)
float* dunary_ptr = dunary.data<float>(); // (batch,n_cv,h,w,n_disp)
float* dcontext_ptr = dcontext.data<float>();
float* dedge_weight_ptr = dedge_weights.data<float>();
float* dmsg_ptr = dmsg.data<float>();
float* dunary_update_ptr = dunary_update.data<float>();
uint n_disp_with_warp = GetNumThreadATree(n_disp, WARP_SIZE);
uint n_thread_a_tree = min(n_disp, MAX_THREADS_PER_BLOCK / n_disp_with_warp) * n_disp_with_warp;
bool is_backward = true, is_training = true;
std::vector<float*> dmsg_address(n_dir), edge_weight_address(n_dir);
std::vector<float*> dedge_weight_address(n_dir), msg_edge_label_address(n_dir);
std::vector<uchar*> msg_norm_index_address(n_dir);
std::vector<Param> param_list;
uint msg_min_size = batch * n_cv * height * width * n_disp;
uint msg_norm_size = msg_min_size / n_disp;
uint msg_edge_label_size = n_dir * msg_min_size * n_disp;
uint msg_norm_index_size = n_dir * msg_norm_size;
uint n_thread_unary = min(MAX_THREADS_PER_BLOCK, msg_min_size);
uint n_block_unary = (msg_min_size + n_thread_unary - 1) / n_thread_unary;
for (int dir = 0; dir < n_dir; ++dir) {
edge_weight_address[dir] = edge_weight_ptr + dir * msg_norm_size;
dedge_weight_address[dir] = dedge_weight_ptr + dir * msg_norm_size;
dmsg_address[dir] = dmsg_ptr + dir * msg_min_size;
Param param(n_dir, batch, n_cv, height, width, n_disp, dir, rho, is_backward, is_training);
UpdateParam(¶m);
param_list.push_back(param);
}
CostAggregateKernelSoftBack<<<n_block_unary, n_thread_unary>>>(param_list[0],
msg_min_size,
dcost_final_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
for (int iter = n_iter - 1; iter >= 0; --iter) {
for (int dir = n_dir - 1; dir >= 0; --dir) {
msg_edge_label_address[dir] = msg_edge_label_ptr + iter * msg_edge_label_size + dir * msg_edge_label_size / n_dir;
msg_norm_index_address[dir] = msg_norm_index_ptr + iter * msg_norm_index_size + dir * msg_norm_size;
uint n_threads = batch * n_cv * param_list[dir].n_trees * n_thread_a_tree;
uint n_blocks = GetNumBlock(n_threads, n_thread_a_tree);
// Diagonal
if (4 <= dir) {
uint h_step_abs = std::abs(param_list[dir].h_step);
uint w_step_abs = std::abs(param_list[dir].w_step);
if (h_step_abs > w_step_abs)
DiagonalKernelNarrowSoftBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
else
DiagonalKernelWideSoftBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
}
// Vertical
if ((2 <= dir) && (dir < 4))
DiagonalKernelWideSoftBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
// Horizontal
if (dir < 2)
HorizontalKernelSoftBack<<<n_blocks, n_thread_a_tree>>>(param_list[dir],
n_threads,
n_thread_a_tree,
context_ptr,
edge_weight_address[dir],
msg_edge_label_address[dir],
msg_norm_index_address[dir],
dmsg_address[dir],
dunary_update_ptr,
dcontext_ptr,
dedge_weight_address[dir]);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
UpdateUnaryKernelSoftBack<<<n_block_unary, n_thread_unary>>>(param_list[dir],
msg_min_size,
dunary_update_ptr,
dunary_ptr,
dmsg_ptr);
#ifdef CUDA_ERROR_CHECK
CUDAErrorCheck();
#endif
cudaMemset(dunary_update_ptr, 0, msg_min_size * sizeof(float));
cudaMemset(dmsg_address[dir], 0, msg_min_size * sizeof(float));
}
}
for (uint dir = 0; dir < n_dir; ++dir) {
if (dmsg_address[dir] != nullptr) dmsg_address[dir] = nullptr;
if (msg_edge_label_address[dir] != nullptr) msg_edge_label_address[dir] = nullptr;
if (msg_norm_index_address[dir] != nullptr) msg_norm_index_address[dir] = nullptr;
if (edge_weight_address[dir] != nullptr) edge_weight_address[dir] = nullptr;
if (dedge_weight_address[dir] != nullptr) dedge_weight_address[dir] = nullptr;
}
}
#ifdef __cplusplus
}
#endif
|
51e110da8cab099d5d13f6638abb06e8fe485074.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/array.h>
#include <af/dim4.hpp>
#include <af/defines.h>
#include <Array.hpp>
#include <diagonal.hpp>
#include <math.hpp>
#include <err_cuda.hpp>
#include <kernel/diagonal.hpp>
namespace cuda
{
template<typename T>
Array<T> diagCreate(const Array<T> &in, const int num)
{
int size = in.dims()[0] + std::abs(num);
int batch = in.dims()[1];
Array<T> out = createEmptyArray<T>(dim4(size, size, batch));
kernel::diagCreate<T>(out, in, num);
return out;
}
template<typename T>
Array<T> diagExtract(const Array<T> &in, const int num)
{
const dim_t *idims = in.dims().get();
dim_t size = ::max(idims[0], idims[1]) - std::abs(num);
Array<T> out = createEmptyArray<T>(dim4(size, 1, idims[2], idims[3]));
kernel::diagExtract<T>(out, in, num);
return out;
}
#define INSTANTIATE_DIAGONAL(T) \
template Array<T> diagExtract<T> (const Array<T> &in, const int num); \
template Array<T> diagCreate <T> (const Array<T> &in, const int num);
INSTANTIATE_DIAGONAL(float)
INSTANTIATE_DIAGONAL(double)
INSTANTIATE_DIAGONAL(cfloat)
INSTANTIATE_DIAGONAL(cdouble)
INSTANTIATE_DIAGONAL(int)
INSTANTIATE_DIAGONAL(uint)
INSTANTIATE_DIAGONAL(intl)
INSTANTIATE_DIAGONAL(uintl)
INSTANTIATE_DIAGONAL(char)
INSTANTIATE_DIAGONAL(uchar)
INSTANTIATE_DIAGONAL(short)
INSTANTIATE_DIAGONAL(ushort)
}
|
51e110da8cab099d5d13f6638abb06e8fe485074.cu
|
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/array.h>
#include <af/dim4.hpp>
#include <af/defines.h>
#include <Array.hpp>
#include <diagonal.hpp>
#include <math.hpp>
#include <err_cuda.hpp>
#include <kernel/diagonal.hpp>
namespace cuda
{
template<typename T>
Array<T> diagCreate(const Array<T> &in, const int num)
{
int size = in.dims()[0] + std::abs(num);
int batch = in.dims()[1];
Array<T> out = createEmptyArray<T>(dim4(size, size, batch));
kernel::diagCreate<T>(out, in, num);
return out;
}
template<typename T>
Array<T> diagExtract(const Array<T> &in, const int num)
{
const dim_t *idims = in.dims().get();
dim_t size = std::max(idims[0], idims[1]) - std::abs(num);
Array<T> out = createEmptyArray<T>(dim4(size, 1, idims[2], idims[3]));
kernel::diagExtract<T>(out, in, num);
return out;
}
#define INSTANTIATE_DIAGONAL(T) \
template Array<T> diagExtract<T> (const Array<T> &in, const int num); \
template Array<T> diagCreate <T> (const Array<T> &in, const int num);
INSTANTIATE_DIAGONAL(float)
INSTANTIATE_DIAGONAL(double)
INSTANTIATE_DIAGONAL(cfloat)
INSTANTIATE_DIAGONAL(cdouble)
INSTANTIATE_DIAGONAL(int)
INSTANTIATE_DIAGONAL(uint)
INSTANTIATE_DIAGONAL(intl)
INSTANTIATE_DIAGONAL(uintl)
INSTANTIATE_DIAGONAL(char)
INSTANTIATE_DIAGONAL(uchar)
INSTANTIATE_DIAGONAL(short)
INSTANTIATE_DIAGONAL(ushort)
}
|
868631e4f7ee6f2267241c98ca949c4d3d3e6cf0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// patch sample learning
// for SPN
// Sifei Liu
// ------------------------------------------------------------------
#include <cfloat>
#include <stdio.h>
#include "caffe/layers/roi_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIForward(const int nthreads, const int bottom_count, const Dtype* bottom_data,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the top
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0]-1;
int roi_start_w = bottom_rois[1]-1;
int roi_start_h = bottom_rois[2]-1;
int bottom_index = roi_batch_ind * channels * height * width + c * height * width + (roi_start_h + ph) * width + (roi_start_w + pw);
// DEBUG
// if (bottom_index >= bottom_count) {
// printf("%d, %d, %d, %d\n", roi_batch_ind, c, (roi_start_h + ph), (roi_start_w + pw));
// }
top_data[index] = bottom_data[bottom_index];
argmax_data[bottom_index] = 1;
}
}
template <typename Dtype>
void ROILayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = mask_.mutable_gpu_data();
int count = top[0]->count();
int bottom_count = bottom[0]->count();
// LOG(INFO) << "bottom_count: " << bottom_count << " argmax_data_count: " << mask_.count();
caffe_gpu_set(bottom_count, 0, argmax_data);
// NOLINT_NEXT_LINE(whitespace/operators)
// LOG(INFO) << "bottom_count: " << bottom_count;
hipLaunchKernelGGL(( ROIForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_count, bottom_data, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the top
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0]-1;
int roi_start_w = bottom_rois[1]-1;
int roi_start_h = bottom_rois[2]-1;
int bottom_index = roi_batch_ind * channels * height * width + c * height * width + (roi_start_h + ph) * width + (roi_start_w + pw);
if (argmax_data[bottom_index] != 0) {
bottom_diff[bottom_index] += top_diff[index];
}
}
}
template <typename Dtype>
void ROILayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
const int count = top[0]->count();
const int* argmax_data = mask_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROILayer);
} // namespace caffe
|
868631e4f7ee6f2267241c98ca949c4d3d3e6cf0.cu
|
// ------------------------------------------------------------------
// patch sample learning
// for SPN
// Sifei Liu
// ------------------------------------------------------------------
#include <cfloat>
#include <stdio.h>
#include "caffe/layers/roi_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIForward(const int nthreads, const int bottom_count, const Dtype* bottom_data,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the top
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0]-1;
int roi_start_w = bottom_rois[1]-1;
int roi_start_h = bottom_rois[2]-1;
int bottom_index = roi_batch_ind * channels * height * width + c * height * width + (roi_start_h + ph) * width + (roi_start_w + pw);
// DEBUG
// if (bottom_index >= bottom_count) {
// printf("%d, %d, %d, %d\n", roi_batch_ind, c, (roi_start_h + ph), (roi_start_w + pw));
// }
top_data[index] = bottom_data[bottom_index];
argmax_data[bottom_index] = 1;
}
}
template <typename Dtype>
void ROILayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = mask_.mutable_gpu_data();
int count = top[0]->count();
int bottom_count = bottom[0]->count();
// LOG(INFO) << "bottom_count: " << bottom_count << " argmax_data_count: " << mask_.count();
caffe_gpu_set(bottom_count, 0, argmax_data);
// NOLINT_NEXT_LINE(whitespace/operators)
// LOG(INFO) << "bottom_count: " << bottom_count;
ROIForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_count, bottom_data, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the top
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0]-1;
int roi_start_w = bottom_rois[1]-1;
int roi_start_h = bottom_rois[2]-1;
int bottom_index = roi_batch_ind * channels * height * width + c * height * width + (roi_start_h + ph) * width + (roi_start_w + pw);
if (argmax_data[bottom_index] != 0) {
bottom_diff[bottom_index] += top_diff[index];
}
}
}
template <typename Dtype>
void ROILayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
const int count = top[0]->count();
const int* argmax_data = mask_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROILayer);
} // namespace caffe
|
3bbdd1bfd9a5f68bb9062b61e37dd53e2575fe7e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bvh_common.h"
#include <radixsort_implement.h>
#include "cuFemMath.cuh"
#include <CudaBase.h>
#include <Spline1D.cuh>
__constant__ float3 CGravity;
__constant__ float3 CWind;
__global__ void computeBVolume_kernel(float4 * dst,
float3 * pos,
uint4 * tetVertices,
uint numTet)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= numTet) return;
calculateBandVolume(&dst[ind<<2], pos, tetVertices[ind]);
}
__global__ void elasticity_kernel(float4 * d,
float * alpha,
float Y,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float bezier = calculateBezierPoint1D(alpha[ind]);
if(bezier < 0.05f) bezier = 0.05f;
float4 d161718;
calculateIsotropicElasticity4(Y * bezier,
d161718);
d[ind] = d161718;
}
__global__ void externalForce_kernel(float3 * dst,
float * mass,
float3 * velocity,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float m = mass[ind];
if(m > 1e28f) {
float3_set_zero(dst[ind]);
return;
}
float3 F = scale_float3_by(CGravity, m);
float3 w = CWind;
float3_scale_inplace(w, m);
float3_add_inplace(F, w);
float3 u = CWind;;
float3_minus_inplace(u, velocity[ind]);
float3_scale_inplace(u, m * 0.01f);
float3_add_inplace(F, u);
dst[ind] = F;
}
__global__ void computeRhs_kernel(float3 * rhs,
float3 * pos,
float3 * vel,
float * mass,
mat33 * stiffness,
uint * rowPtr,
uint * colInd,
float3 * f0,
float3 * externalForce,
float dt,
float dt2,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3 result;
float3_set_zero(result);
const uint nextRow = rowPtr[ind+1];
uint cur = rowPtr[ind];
const float mi = mass[ind];
mat33 K;
uint j;
float3 tmp;
float damping;
for(;cur<nextRow; cur++) {
K = stiffness[cur];
j = colInd[cur];
mat33_float3_prod(tmp, K, pos[j]);
float3_minus_inplace(result, tmp);
mat33_mult_f(K, dt2);
stiffness[cur] = K;
if(ind == colInd[cur]) {
damping = .89f * mi * dt + mi;
K.v[0].x += damping;
K.v[1].y += damping;
K.v[2].z += damping;
stiffness[cur] = K;
}
}
float3_minus_inplace(result, f0[ind]);
float3_add_inplace(result, externalForce[ind]);
float3_scale_inplace(result, dt);
tmp = vel[ind];
float3_scale_inplace(tmp, mi);
float3_add_inplace(result, tmp);
rhs[ind] = result;
}
__global__ void dampK_kernel(mat33 * stiffness,
float * mass,
uint * rowPtr,
uint * colInd,
float dt,
float dt2,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint nextRow = rowPtr[ind+1];
uint cur = rowPtr[ind];
const float mi = mass[ind];
float damping;
for(;cur<nextRow; cur++) {
mat33_mult_f(stiffness[cur], dt2);
if(ind == colInd[cur]) {
damping = .1f * mi * dt + mi;
stiffness[cur].v[0].x += damping;
stiffness[cur].v[1].y += damping;
stiffness[cur].v[2].z += damping;
}
}
}
__global__ void internalForce_kernel(float3 * dst,
float3 * pos,
uint4 * tetvert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3_set_zero(dst[ind]);
float4 d161718;
float4 * B;
float3 pj, force, sum;
mat33 Ke, Re;
uint iTet, i, j;
uint cur = bufferIndices[ind];
uint lastTet = 9496729;
for(;;) {
if(tetraInd[cur].key != ind) break;
extractTetij(tetraInd[cur].value, iTet, i, j);
if(lastTet != iTet) {
if(lastTet != 9496729) {
mat33_float3_prod(force, Re, sum);
float3_minus_inplace(dst[ind], force);
}
float3_set_zero(sum);
lastTet = iTet;
}
Re = orientation[iTet];
B = &BVol[iTet<<2];
d161718 = elasticity[iTet];
calculateKe(Ke, B, d161718.x, d161718.y, d161718.z, i, j);
uint * tetv = &(tetvert[iTet].x);
pj = pos[tetv[j]];
mat33_float3_prod(force, Ke, pj);
float3_add_inplace(sum, force);
cur++;
if(cur >= maxBufferInd) break;
}
mat33_float3_prod(force, Re, sum);
float3_minus_inplace(dst[ind], force);
}
__global__ void resetForce_kernel(float3 * dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3_set_zero(dst[ind]);
}
__global__ void resetStiffnessMatrix_kernel(mat33* dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_zero(dst[ind]);
}
__global__ void stiffnessAssembly_kernel(mat33 * dst,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_zero(dst[ind]);
float4 d161718;
float4 * B;
mat33 Ke, Re, ReT, tmp, tmpT;
uint iTet, i, j, needT;
uint cur = bufferIndices[ind];
for(;;) {
if(tetraInd[cur].key != ind) break;
extractTetijt(tetraInd[cur].value, iTet, i, j, needT);
B = &BVol[iTet<<2];
d161718 = elasticity[iTet];
calculateKe(Ke, B, d161718.x, d161718.y, d161718.z, i, j);
Re = orientation[iTet];
mat33_transpose(ReT, Re);
mat33_cpy(tmp, Re);
mat33_mult(tmp, Ke);
mat33_mult(tmp, ReT);
mat33_transpose(tmpT, tmp);
if(needT)
mat33_add(dst[ind], tmpT);
else
mat33_add(dst[ind], tmp);
cur++;
if(cur >= maxBufferInd) break;
}
}
__global__ void resetRe_kernel(mat33* dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_identity(dst[ind]);
}
__global__ void calculateRe_kernel(mat33 * dst,
float3 * pos,
float3 * pos0,
float4 * BVol,
uint4 * indices,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
uint4 t = indices[ind];
float4 * B = &BVol[ind<<2];
float3 pnt[4];
tetrahedronP(pnt, pos0, t);
float3 e01, e02, e03;
tetrahedronEdge(e01, e02, e03, pnt);
float div6V = 1.f / B[0].w * 6.f;
tetrahedronP(pnt, pos, t);
float3 e1, e2, e3;
tetrahedronEdge(e1, e2, e3, pnt);
float3 n1 = scale_float3_by(float3_cross(e2, e3), div6V);
float3 n2 = scale_float3_by(float3_cross(e3, e1), div6V);
float3 n3 = scale_float3_by(float3_cross(e1, e2), div6V);
mat33 * Ke = &dst[ind];
Ke->v[0].x = e01.x * n1.x + e02.x * n2.x + e03.x * n3.x;
Ke->v[1].x = e01.x * n1.y + e02.x * n2.y + e03.x * n3.y;
Ke->v[2].x = e01.x * n1.z + e02.x * n2.z + e03.x * n3.z;
Ke->v[0].y = e01.y * n1.x + e02.y * n2.x + e03.y * n3.x;
Ke->v[1].y = e01.y * n1.y + e02.y * n2.y + e03.y * n3.y;
Ke->v[2].y = e01.y * n1.z + e02.y * n2.z + e03.y * n3.z;
Ke->v[0].z = e01.z * n1.x + e02.z * n2.x + e03.z * n3.x;
Ke->v[1].z = e01.z * n1.y + e02.z * n2.y + e03.z * n3.y;
Ke->v[2].z = e01.z * n1.z + e02.z * n2.z + e03.z * n3.z;
mat33_orthoNormalize(*Ke);
}
extern "C" {
void cuFemTetrahedron_resetRe(mat33 * d, uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetRe_kernel), dim3(grid), dim3(block) , 0, 0, d, maxInd);
}
void cuFemTetrahedron_resetStiffnessMatrix(mat33 * dst,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetStiffnessMatrix_kernel), dim3(grid), dim3(block) , 0, 0, dst,
maxInd);
}
void cuFemTetrahedron_resetForce(float3 * dst,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetForce_kernel), dim3(grid), dim3(block) , 0, 0, dst, maxInd);
}
void cuFemTetrahedron_computeRhs(float3 * rhs,
float3 * pos,
float3 * vel,
float * mass,
mat33 * stiffness,
uint * rowPtr,
uint * colInd,
float3 * f0,
float3 * externalForce,
float dt,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeRhs_kernel), dim3(grid), dim3(block) , 0, 0, rhs,
pos,
vel,
mass,
stiffness,
rowPtr,
colInd,
f0,
externalForce,
dt,
dt * dt,
maxInd);
}
void cuFemTetrahedron_dampK(mat33 * stiffness,
float * mass,
uint * rowPtr,
uint * colInd,
float dt,
uint maxInd)
{
int tpb = CudaBase::LimitNThreadPerBlock(16, 50);
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( dampK_kernel), dim3(grid), dim3(block) , 0, 0, stiffness,
mass,
rowPtr,
colInd,
dt,
dt * dt,
maxInd);
}
}
namespace tetrahedronfem {
void setGravity(float * g)
{ hipMemcpyToSymbol(CGravity, g, 12); }
void setWind(float * w)
{ hipMemcpyToSymbol(CWind, w, 12); }
void computeExternalForce(float3 * dst,
float * mass,
float3 * velocity,
float * wind,
uint maxInd)
{
setWind(wind);
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( externalForce_kernel), dim3(grid), dim3(block) , 0, 0, dst,
mass,
velocity,
maxInd);
}
void computeBVolume(float4 * dst,
float3 * pos,
uint4 * tetVertices,
uint numTet)
{
dim3 block(256, 1, 1);
unsigned nblk = iDivUp(numTet, 256);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( computeBVolume_kernel), dim3(grid), dim3(block) , 0, 0, dst,
pos,
tetVertices,
numTet);
}
void calculateRe(mat33 * dst,
float3 * pos,
float3 * pos0,
uint4 * indices,
float4 * BVol,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( calculateRe_kernel), dim3(grid), dim3(block) , 0, 0, dst,
pos,
pos0,
BVol,
indices,
maxInd);
}
void internalForce(float3 * dst,
float3 * pos,
uint4 * tetvert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( internalForce_kernel), dim3(grid), dim3(block) , 0, 0, dst,
pos,
tetvert,
BVol,
orientation,
tetraInd,
bufferIndices,
elasticity,
maxBufferInd,
maxInd);
}
void stiffnessAssembly(mat33 * dst,
float3 * pos,
uint4 * vert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( stiffnessAssembly_kernel), dim3(grid), dim3(block) , 0, 0, dst,
BVol,
orientation,
tetraInd,
bufferIndices,
elasticity,
maxBufferInd,
maxInd);
}
void computeElasticity(float4 * d,
float * alpha,
float Y,
uint maxInd,
float * splineV)
{
hipMemcpyToSymbol(CSplineCvs, splineV, 32);
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( elasticity_kernel), dim3(grid), dim3(block) , 0, 0, d,
alpha,
Y,
maxInd);
}
}
|
3bbdd1bfd9a5f68bb9062b61e37dd53e2575fe7e.cu
|
#include "bvh_common.h"
#include <radixsort_implement.h>
#include "cuFemMath.cuh"
#include <CudaBase.h>
#include <Spline1D.cuh>
__constant__ float3 CGravity;
__constant__ float3 CWind;
__global__ void computeBVolume_kernel(float4 * dst,
float3 * pos,
uint4 * tetVertices,
uint numTet)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= numTet) return;
calculateBandVolume(&dst[ind<<2], pos, tetVertices[ind]);
}
__global__ void elasticity_kernel(float4 * d,
float * alpha,
float Y,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float bezier = calculateBezierPoint1D(alpha[ind]);
if(bezier < 0.05f) bezier = 0.05f;
float4 d161718;
calculateIsotropicElasticity4(Y * bezier,
d161718);
d[ind] = d161718;
}
__global__ void externalForce_kernel(float3 * dst,
float * mass,
float3 * velocity,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float m = mass[ind];
if(m > 1e28f) {
float3_set_zero(dst[ind]);
return;
}
float3 F = scale_float3_by(CGravity, m);
float3 w = CWind;
float3_scale_inplace(w, m);
float3_add_inplace(F, w);
float3 u = CWind;;
float3_minus_inplace(u, velocity[ind]);
float3_scale_inplace(u, m * 0.01f);
float3_add_inplace(F, u);
dst[ind] = F;
}
__global__ void computeRhs_kernel(float3 * rhs,
float3 * pos,
float3 * vel,
float * mass,
mat33 * stiffness,
uint * rowPtr,
uint * colInd,
float3 * f0,
float3 * externalForce,
float dt,
float dt2,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3 result;
float3_set_zero(result);
const uint nextRow = rowPtr[ind+1];
uint cur = rowPtr[ind];
const float mi = mass[ind];
mat33 K;
uint j;
float3 tmp;
float damping;
for(;cur<nextRow; cur++) {
K = stiffness[cur];
j = colInd[cur];
mat33_float3_prod(tmp, K, pos[j]);
float3_minus_inplace(result, tmp);
mat33_mult_f(K, dt2);
stiffness[cur] = K;
if(ind == colInd[cur]) {
damping = .89f * mi * dt + mi;
K.v[0].x += damping;
K.v[1].y += damping;
K.v[2].z += damping;
stiffness[cur] = K;
}
}
float3_minus_inplace(result, f0[ind]);
float3_add_inplace(result, externalForce[ind]);
float3_scale_inplace(result, dt);
tmp = vel[ind];
float3_scale_inplace(tmp, mi);
float3_add_inplace(result, tmp);
rhs[ind] = result;
}
__global__ void dampK_kernel(mat33 * stiffness,
float * mass,
uint * rowPtr,
uint * colInd,
float dt,
float dt2,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
const uint nextRow = rowPtr[ind+1];
uint cur = rowPtr[ind];
const float mi = mass[ind];
float damping;
for(;cur<nextRow; cur++) {
mat33_mult_f(stiffness[cur], dt2);
if(ind == colInd[cur]) {
damping = .1f * mi * dt + mi;
stiffness[cur].v[0].x += damping;
stiffness[cur].v[1].y += damping;
stiffness[cur].v[2].z += damping;
}
}
}
__global__ void internalForce_kernel(float3 * dst,
float3 * pos,
uint4 * tetvert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3_set_zero(dst[ind]);
float4 d161718;
float4 * B;
float3 pj, force, sum;
mat33 Ke, Re;
uint iTet, i, j;
uint cur = bufferIndices[ind];
uint lastTet = 9496729;
for(;;) {
if(tetraInd[cur].key != ind) break;
extractTetij(tetraInd[cur].value, iTet, i, j);
if(lastTet != iTet) {
if(lastTet != 9496729) {
mat33_float3_prod(force, Re, sum);
float3_minus_inplace(dst[ind], force);
}
float3_set_zero(sum);
lastTet = iTet;
}
Re = orientation[iTet];
B = &BVol[iTet<<2];
d161718 = elasticity[iTet];
calculateKe(Ke, B, d161718.x, d161718.y, d161718.z, i, j);
uint * tetv = &(tetvert[iTet].x);
pj = pos[tetv[j]];
mat33_float3_prod(force, Ke, pj);
float3_add_inplace(sum, force);
cur++;
if(cur >= maxBufferInd) break;
}
mat33_float3_prod(force, Re, sum);
float3_minus_inplace(dst[ind], force);
}
__global__ void resetForce_kernel(float3 * dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
float3_set_zero(dst[ind]);
}
__global__ void resetStiffnessMatrix_kernel(mat33* dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_zero(dst[ind]);
}
__global__ void stiffnessAssembly_kernel(mat33 * dst,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_zero(dst[ind]);
float4 d161718;
float4 * B;
mat33 Ke, Re, ReT, tmp, tmpT;
uint iTet, i, j, needT;
uint cur = bufferIndices[ind];
for(;;) {
if(tetraInd[cur].key != ind) break;
extractTetijt(tetraInd[cur].value, iTet, i, j, needT);
B = &BVol[iTet<<2];
d161718 = elasticity[iTet];
calculateKe(Ke, B, d161718.x, d161718.y, d161718.z, i, j);
Re = orientation[iTet];
mat33_transpose(ReT, Re);
mat33_cpy(tmp, Re);
mat33_mult(tmp, Ke);
mat33_mult(tmp, ReT);
mat33_transpose(tmpT, tmp);
if(needT)
mat33_add(dst[ind], tmpT);
else
mat33_add(dst[ind], tmp);
cur++;
if(cur >= maxBufferInd) break;
}
}
__global__ void resetRe_kernel(mat33* dst,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
set_mat33_identity(dst[ind]);
}
__global__ void calculateRe_kernel(mat33 * dst,
float3 * pos,
float3 * pos0,
float4 * BVol,
uint4 * indices,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
uint4 t = indices[ind];
float4 * B = &BVol[ind<<2];
float3 pnt[4];
tetrahedronP(pnt, pos0, t);
float3 e01, e02, e03;
tetrahedronEdge(e01, e02, e03, pnt);
float div6V = 1.f / B[0].w * 6.f;
tetrahedronP(pnt, pos, t);
float3 e1, e2, e3;
tetrahedronEdge(e1, e2, e3, pnt);
float3 n1 = scale_float3_by(float3_cross(e2, e3), div6V);
float3 n2 = scale_float3_by(float3_cross(e3, e1), div6V);
float3 n3 = scale_float3_by(float3_cross(e1, e2), div6V);
mat33 * Ke = &dst[ind];
Ke->v[0].x = e01.x * n1.x + e02.x * n2.x + e03.x * n3.x;
Ke->v[1].x = e01.x * n1.y + e02.x * n2.y + e03.x * n3.y;
Ke->v[2].x = e01.x * n1.z + e02.x * n2.z + e03.x * n3.z;
Ke->v[0].y = e01.y * n1.x + e02.y * n2.x + e03.y * n3.x;
Ke->v[1].y = e01.y * n1.y + e02.y * n2.y + e03.y * n3.y;
Ke->v[2].y = e01.y * n1.z + e02.y * n2.z + e03.y * n3.z;
Ke->v[0].z = e01.z * n1.x + e02.z * n2.x + e03.z * n3.x;
Ke->v[1].z = e01.z * n1.y + e02.z * n2.y + e03.z * n3.y;
Ke->v[2].z = e01.z * n1.z + e02.z * n2.z + e03.z * n3.z;
mat33_orthoNormalize(*Ke);
}
extern "C" {
void cuFemTetrahedron_resetRe(mat33 * d, uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
resetRe_kernel<<< grid, block >>>(d, maxInd);
}
void cuFemTetrahedron_resetStiffnessMatrix(mat33 * dst,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
resetStiffnessMatrix_kernel<<< grid, block >>>(dst,
maxInd);
}
void cuFemTetrahedron_resetForce(float3 * dst,
uint maxInd)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
resetForce_kernel<<< grid, block >>>(dst, maxInd);
}
void cuFemTetrahedron_computeRhs(float3 * rhs,
float3 * pos,
float3 * vel,
float * mass,
mat33 * stiffness,
uint * rowPtr,
uint * colInd,
float3 * f0,
float3 * externalForce,
float dt,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
computeRhs_kernel<<< grid, block >>>(rhs,
pos,
vel,
mass,
stiffness,
rowPtr,
colInd,
f0,
externalForce,
dt,
dt * dt,
maxInd);
}
void cuFemTetrahedron_dampK(mat33 * stiffness,
float * mass,
uint * rowPtr,
uint * colInd,
float dt,
uint maxInd)
{
int tpb = CudaBase::LimitNThreadPerBlock(16, 50);
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
dampK_kernel<<< grid, block >>>(stiffness,
mass,
rowPtr,
colInd,
dt,
dt * dt,
maxInd);
}
}
namespace tetrahedronfem {
void setGravity(float * g)
{ cudaMemcpyToSymbol(CGravity, g, 12); }
void setWind(float * w)
{ cudaMemcpyToSymbol(CWind, w, 12); }
void computeExternalForce(float3 * dst,
float * mass,
float3 * velocity,
float * wind,
uint maxInd)
{
setWind(wind);
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
externalForce_kernel<<< grid, block >>>(dst,
mass,
velocity,
maxInd);
}
void computeBVolume(float4 * dst,
float3 * pos,
uint4 * tetVertices,
uint numTet)
{
dim3 block(256, 1, 1);
unsigned nblk = iDivUp(numTet, 256);
dim3 grid(nblk, 1, 1);
computeBVolume_kernel<<< grid, block >>>(dst,
pos,
tetVertices,
numTet);
}
void calculateRe(mat33 * dst,
float3 * pos,
float3 * pos0,
uint4 * indices,
float4 * BVol,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
calculateRe_kernel<<< grid, block >>>(dst,
pos,
pos0,
BVol,
indices,
maxInd);
}
void internalForce(float3 * dst,
float3 * pos,
uint4 * tetvert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
internalForce_kernel<<< grid, block >>>(dst,
pos,
tetvert,
BVol,
orientation,
tetraInd,
bufferIndices,
elasticity,
maxBufferInd,
maxInd);
}
void stiffnessAssembly(mat33 * dst,
float3 * pos,
uint4 * vert,
float4 * BVol,
mat33 * orientation,
KeyValuePair * tetraInd,
uint * bufferIndices,
float4 * elasticity,
uint maxBufferInd,
uint maxInd)
{
int tpb = 256;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(maxInd, tpb);
dim3 grid(nblk, 1, 1);
stiffnessAssembly_kernel<<< grid, block >>>(dst,
BVol,
orientation,
tetraInd,
bufferIndices,
elasticity,
maxBufferInd,
maxInd);
}
void computeElasticity(float4 * d,
float * alpha,
float Y,
uint maxInd,
float * splineV)
{
cudaMemcpyToSymbol(CSplineCvs, splineV, 32);
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(maxInd, 512);
dim3 grid(nblk, 1, 1);
elasticity_kernel<<< grid, block >>>(d,
alpha,
Y,
maxInd);
}
}
|
6dde7711524f226b3c497419cf3dc4e6990643b6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Apoorv Vyas <[email protected]>
//
#include <hip/hip_cooperative_groups.h>
#include <torch/extension.h>
using namespace cooperative_groups;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
typedef torch::PackedTensorAccessor32<int64_t, 4, torch::RestrictPtrTraits> int64_accessor_4d;
inline __device__ float dot(const float *a, const float *b, int n) {
float s = 0;
for (int i=0; i<n; i++) {
s += (*a) * (*b);
a++;
b++;
}
return s;
}
inline __device__ void add_scaled(float *a, const float *b, float s, int n) {
for (int i=0; i<n; i++) {
atomicAdd(a, s * (*b));
a++;
b++;
}
}
__global__ void sparse_dot_product_kernel(
const float_accessor_4d queries,
const float_accessor_4d keys,
const int64_accessor_4d topk,
float_accessor_4d products,
int q_load
) {
const int N = queries.size(0);
const int H = queries.size(1);
const int L = queries.size(2);
const int E = queries.size(3);
const int S = keys.size(2);
const int hl = H*L;
extern __shared__ float shared_qs[];
int full_indx = q_load*blockIdx.x + threadIdx.x;
int n = full_indx / (hl);
int h = (full_indx - n*hl) / L;
int l = (full_indx - n*hl) % L;
if ((threadIdx.x < q_load) && ((q_load*blockIdx.x + threadIdx.x) < (N*L*H))) {
int q_indx = threadIdx.x;
float *s_ptr = shared_qs + q_indx;
for (int e=0; e<E; e++) {
*s_ptr = queries[n][h][l][e];
s_ptr += q_load;
}
}
__syncthreads();
int q_indx = threadIdx.x % q_load;
int topk_idx = threadIdx.x / q_load;
int q_processed = (blockIdx.x*q_load) + q_indx;
int seq_idx = q_processed / (hl);
int h_idx = (q_processed - seq_idx*hl)/L;
int l_idx = (q_processed - seq_idx*hl)%L;
if ((seq_idx >= N) || (l_idx >= L) || (h_idx >= H)) {
return;
}
float s = 0;
const float *q_cur = shared_qs + q_indx;
int k_idx = topk[seq_idx][h_idx][l_idx][topk_idx];
//#pragma unroll 8
for (int e=0; e<E; e++) {
s += (*q_cur) * keys[seq_idx][h_idx][k_idx][e];
q_cur += q_load;
}
products[seq_idx][h_idx][l_idx][topk_idx] = s;
}
void sparse_dot_product(
const torch::Tensor Q,
const torch::Tensor K,
const torch::Tensor topk,
torch::Tensor product
) {
int N = Q.size(0);
int H = Q.size(1);
int L = Q.size(2);
int E = Q.size(3);
int k = topk.size(3);
int S = K.size(2);
int max_threads = 1024;
int q_max = (48 * 1024)/(4*E) < L ? (48 * 1024)/(4*E):L;
int q_load = (max_threads/k) < q_max ? (max_threads/k):q_max;
int threads = q_load * k;
const int shared_mem_queries = q_load * E * sizeof(float);
int total_products = L*N*H*k;
int blocks = ceil(float(total_products)/(q_load * k));
hipLaunchKernelGGL(( sparse_dot_product_kernel), dim3(blocks),
dim3(threads),
shared_mem_queries, 0,
Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>(),
product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
q_load
);
}
__global__ void sparse_dot_backward_kernel(
const float_accessor_4d queries,
const float_accessor_4d keys,
const int64_accessor_4d topk,
const float_accessor_4d grad_out,
float_accessor_4d grad_q,
float_accessor_4d grad_k
) {
const int N = queries.size(0);
const int H = queries.size(1);
const int L = queries.size(2);
const int E = queries.size(3);
const int S = keys.size(2);
const int k = topk.size(3);
int full_index = blockIdx.x * blockDim.x + threadIdx.x;
int n = full_index / (H*L*k);
int h = (full_index - n*H*L*k) / (L*k);
int l = (full_index - n*H*L*k - h*L*k) / k;
int j = full_index % k;
if (n >= N) {
return;
}
const int key_index = topk[n][h][l][j];
const float grad = grad_out[n][h][l][j];
for (int e=0; e<E; e++) {
atomicAdd(&grad_q[n][h][l][e], grad * keys[n][h][key_index][e]);
}
for (int e=0; e<E; e++) {
atomicAdd(&grad_k[n][h][key_index][e], grad * queries[n][h][l][e]);
}
}
void sparse_dot_backward(
const torch::Tensor Q,
const torch::Tensor K,
const torch::Tensor topk,
const torch::Tensor grad_out,
torch::Tensor grad_Q,
torch::Tensor grad_K
) {
int N = Q.size(0);
int H = Q.size(1);
int L = Q.size(2);
int E = Q.size(3);
int k = topk.size(3);
int S = K.size(2);
int threads = 1024;
int blocks = (N*H*L*k + threads - 1) / threads;
hipLaunchKernelGGL(( sparse_dot_backward_kernel), dim3(blocks), dim3(threads), 0, 0,
Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_K.packed_accessor32<float, 4, torch::RestrictPtrTraits>()
);
}
__global__ void sparse_weighted_average_kernel(
const float_accessor_4d weights,
const float_accessor_4d values,
const int64_accessor_4d topk,
float_accessor_4d output,
int N,
int H,
int L,
int E,
int k,
int n_dim_per_thread
) {
extern __shared__ float shared_mem[];
int block_idx = blockIdx.x;
if ((block_idx > N*H*L)){
return;
}
int n = (block_idx) / (H*L);
int h = (block_idx - n*H*L) / (L);
int l = block_idx % L;
if ((threadIdx.x < k)) {
shared_mem[k*E + threadIdx.x] = weights[n][h][l][threadIdx.x];
shared_mem[(k*(E+1)) + threadIdx.x] = topk[n][h][l][threadIdx.x];
}
__syncthreads();
if (threadIdx.x < k) {
int n_threads_per_key = E / n_dim_per_thread;
int j = threadIdx.x / n_threads_per_key ;
int d_start = (threadIdx.x - j*n_threads_per_key) * n_dim_per_thread;
int key_idx = int(shared_mem[(k*(E+1)) + j]);
const float s = shared_mem[k*E + j];
for(int i=0; i<n_dim_per_thread; i++) {
int cur_d = d_start + i;
float v = values[n][h][key_idx][cur_d];
shared_mem[j + (cur_d * k)] = v * s;
}
}
__syncthreads();
if ((threadIdx.x < E)) {
float sum = 0;
int start = threadIdx.x*k;
for (int i=start; i<start+k; i++) {
sum = sum + shared_mem[i];
}
output[n][h][l][threadIdx.x] = sum;
}
}
void sparse_weighted_average(
const torch::Tensor weights,
const torch::Tensor values,
const torch::Tensor topk,
torch::Tensor output
) {
int N = weights.size(0);
int H = weights.size(1);
int L = weights.size(2);
int k = weights.size(3);
int E = values.size(3);
auto weights_a = weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto values_a = values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto topk_a = topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
//float* output_p = output.data_ptr<float>();
int max_threads = 1024;
int n_dim_per_thread = E;
// We need at least E threads for the final reduction
int threads = ceil((E * k)/n_dim_per_thread) > E ? ceil((E * k)/n_dim_per_thread):E;
int total_products = L*N*H*k;
int blocks = ceil(float(total_products)/(k));
const int shared_mem = (((k * E) + 2*k)* sizeof(float));
hipLaunchKernelGGL(( sparse_weighted_average_kernel), dim3(blocks),
dim3(threads),
shared_mem, 0,
weights_a,
values_a,
topk_a,
output_a,
N,
H,
L,
E,
k,
n_dim_per_thread
);
}
__global__ void sparse_weighted_average_backward_kernel(
const float_accessor_4d weights,
const float_accessor_4d values,
const int64_accessor_4d topk,
const float_accessor_4d grad_out,
float_accessor_4d grad_weights,
float_accessor_4d grad_values,
int N,
int H,
int L,
int E,
int k,
int dim_per_thread
) {
int full_index = blockIdx.x * blockDim.x + threadIdx.x;
int n = full_index / (H*L*k);
int h = (full_index - n*H*L*k) / (L*k);
int l = (full_index - n*H*L*k - h*L*k) / k;
int j = full_index % k;
if (n >= N) {
return;
}
int key_idx = topk[n][h][l][j];
int start_dim = threadIdx.y * dim_per_thread;
int end_dim = start_dim + dim_per_thread;
if (threadIdx.y == 0) {
grad_weights[n][h][l][j] = dot(
&values[n][h][key_idx][0],
&grad_out[n][h][l][0],
E
);
}
float weight = weights[n][h][l][j];
for (int e=start_dim; e<end_dim; e++) {
atomicAdd(
&grad_values[n][h][key_idx][e],
weight * grad_out[n][h][l][e]
);
}
}
void sparse_weighted_average_backward(
const torch::Tensor weights,
const torch::Tensor values,
const torch::Tensor topk,
const torch::Tensor grad_out,
torch::Tensor grad_weights,
torch::Tensor grad_values
) {
int N = weights.size(0);
int H = weights.size(1);
int L = weights.size(2);
int k = weights.size(3);
int E = values.size(3);
auto weights_a = weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto values_a = values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto topk_a = topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>();
auto grad_out_a = grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto grad_weights_a = grad_weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto grad_values_a = grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
int threads_x = 256;
int threads_y = 4;
int dim_per_thread = E / threads_y;
dim3 threads(threads_x, threads_y);
int blocks = (N*H*L*k + threads_x - 1)/threads_x;
hipLaunchKernelGGL(( sparse_weighted_average_backward_kernel), dim3(blocks), dim3(threads), 0, 0,
weights_a,
values_a,
topk_a,
grad_out_a,
grad_weights_a,
grad_values_a,
N,
H,
L,
E,
k,
dim_per_thread
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"sparse_dot_product",
&sparse_dot_product,
"Compute the dot product only in the positions specified by topk."
);
m.def(
"sparse_dot_backward",
&sparse_dot_backward,
"Compute the gradients for the sparse dot product."
);
m.def(
"sparse_weighted_average",
&sparse_weighted_average,
"Average the values using the sparse attention."
);
m.def(
"sparse_weighted_average_backward",
&sparse_weighted_average_backward,
"Compute the gradients for the sparse weighted average."
);
}
|
6dde7711524f226b3c497419cf3dc4e6990643b6.cu
|
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Apoorv Vyas <[email protected]>
//
#include <cooperative_groups.h>
#include <torch/extension.h>
using namespace cooperative_groups;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
typedef torch::PackedTensorAccessor32<int64_t, 4, torch::RestrictPtrTraits> int64_accessor_4d;
inline __device__ float dot(const float *a, const float *b, int n) {
float s = 0;
for (int i=0; i<n; i++) {
s += (*a) * (*b);
a++;
b++;
}
return s;
}
inline __device__ void add_scaled(float *a, const float *b, float s, int n) {
for (int i=0; i<n; i++) {
atomicAdd(a, s * (*b));
a++;
b++;
}
}
__global__ void sparse_dot_product_kernel(
const float_accessor_4d queries,
const float_accessor_4d keys,
const int64_accessor_4d topk,
float_accessor_4d products,
int q_load
) {
const int N = queries.size(0);
const int H = queries.size(1);
const int L = queries.size(2);
const int E = queries.size(3);
const int S = keys.size(2);
const int hl = H*L;
extern __shared__ float shared_qs[];
int full_indx = q_load*blockIdx.x + threadIdx.x;
int n = full_indx / (hl);
int h = (full_indx - n*hl) / L;
int l = (full_indx - n*hl) % L;
if ((threadIdx.x < q_load) && ((q_load*blockIdx.x + threadIdx.x) < (N*L*H))) {
int q_indx = threadIdx.x;
float *s_ptr = shared_qs + q_indx;
for (int e=0; e<E; e++) {
*s_ptr = queries[n][h][l][e];
s_ptr += q_load;
}
}
__syncthreads();
int q_indx = threadIdx.x % q_load;
int topk_idx = threadIdx.x / q_load;
int q_processed = (blockIdx.x*q_load) + q_indx;
int seq_idx = q_processed / (hl);
int h_idx = (q_processed - seq_idx*hl)/L;
int l_idx = (q_processed - seq_idx*hl)%L;
if ((seq_idx >= N) || (l_idx >= L) || (h_idx >= H)) {
return;
}
float s = 0;
const float *q_cur = shared_qs + q_indx;
int k_idx = topk[seq_idx][h_idx][l_idx][topk_idx];
//#pragma unroll 8
for (int e=0; e<E; e++) {
s += (*q_cur) * keys[seq_idx][h_idx][k_idx][e];
q_cur += q_load;
}
products[seq_idx][h_idx][l_idx][topk_idx] = s;
}
void sparse_dot_product(
const torch::Tensor Q,
const torch::Tensor K,
const torch::Tensor topk,
torch::Tensor product
) {
int N = Q.size(0);
int H = Q.size(1);
int L = Q.size(2);
int E = Q.size(3);
int k = topk.size(3);
int S = K.size(2);
int max_threads = 1024;
int q_max = (48 * 1024)/(4*E) < L ? (48 * 1024)/(4*E):L;
int q_load = (max_threads/k) < q_max ? (max_threads/k):q_max;
int threads = q_load * k;
const int shared_mem_queries = q_load * E * sizeof(float);
int total_products = L*N*H*k;
int blocks = ceil(float(total_products)/(q_load * k));
sparse_dot_product_kernel<<<blocks,
threads,
shared_mem_queries>>>(
Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>(),
product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
q_load
);
}
__global__ void sparse_dot_backward_kernel(
const float_accessor_4d queries,
const float_accessor_4d keys,
const int64_accessor_4d topk,
const float_accessor_4d grad_out,
float_accessor_4d grad_q,
float_accessor_4d grad_k
) {
const int N = queries.size(0);
const int H = queries.size(1);
const int L = queries.size(2);
const int E = queries.size(3);
const int S = keys.size(2);
const int k = topk.size(3);
int full_index = blockIdx.x * blockDim.x + threadIdx.x;
int n = full_index / (H*L*k);
int h = (full_index - n*H*L*k) / (L*k);
int l = (full_index - n*H*L*k - h*L*k) / k;
int j = full_index % k;
if (n >= N) {
return;
}
const int key_index = topk[n][h][l][j];
const float grad = grad_out[n][h][l][j];
for (int e=0; e<E; e++) {
atomicAdd(&grad_q[n][h][l][e], grad * keys[n][h][key_index][e]);
}
for (int e=0; e<E; e++) {
atomicAdd(&grad_k[n][h][key_index][e], grad * queries[n][h][l][e]);
}
}
void sparse_dot_backward(
const torch::Tensor Q,
const torch::Tensor K,
const torch::Tensor topk,
const torch::Tensor grad_out,
torch::Tensor grad_Q,
torch::Tensor grad_K
) {
int N = Q.size(0);
int H = Q.size(1);
int L = Q.size(2);
int E = Q.size(3);
int k = topk.size(3);
int S = K.size(2);
int threads = 1024;
int blocks = (N*H*L*k + threads - 1) / threads;
sparse_dot_backward_kernel<<<blocks, threads>>>(
Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_K.packed_accessor32<float, 4, torch::RestrictPtrTraits>()
);
}
__global__ void sparse_weighted_average_kernel(
const float_accessor_4d weights,
const float_accessor_4d values,
const int64_accessor_4d topk,
float_accessor_4d output,
int N,
int H,
int L,
int E,
int k,
int n_dim_per_thread
) {
extern __shared__ float shared_mem[];
int block_idx = blockIdx.x;
if ((block_idx > N*H*L)){
return;
}
int n = (block_idx) / (H*L);
int h = (block_idx - n*H*L) / (L);
int l = block_idx % L;
if ((threadIdx.x < k)) {
shared_mem[k*E + threadIdx.x] = weights[n][h][l][threadIdx.x];
shared_mem[(k*(E+1)) + threadIdx.x] = topk[n][h][l][threadIdx.x];
}
__syncthreads();
if (threadIdx.x < k) {
int n_threads_per_key = E / n_dim_per_thread;
int j = threadIdx.x / n_threads_per_key ;
int d_start = (threadIdx.x - j*n_threads_per_key) * n_dim_per_thread;
int key_idx = int(shared_mem[(k*(E+1)) + j]);
const float s = shared_mem[k*E + j];
for(int i=0; i<n_dim_per_thread; i++) {
int cur_d = d_start + i;
float v = values[n][h][key_idx][cur_d];
shared_mem[j + (cur_d * k)] = v * s;
}
}
__syncthreads();
if ((threadIdx.x < E)) {
float sum = 0;
int start = threadIdx.x*k;
for (int i=start; i<start+k; i++) {
sum = sum + shared_mem[i];
}
output[n][h][l][threadIdx.x] = sum;
}
}
void sparse_weighted_average(
const torch::Tensor weights,
const torch::Tensor values,
const torch::Tensor topk,
torch::Tensor output
) {
int N = weights.size(0);
int H = weights.size(1);
int L = weights.size(2);
int k = weights.size(3);
int E = values.size(3);
auto weights_a = weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto values_a = values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto topk_a = topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
//float* output_p = output.data_ptr<float>();
int max_threads = 1024;
int n_dim_per_thread = E;
// We need at least E threads for the final reduction
int threads = ceil((E * k)/n_dim_per_thread) > E ? ceil((E * k)/n_dim_per_thread):E;
int total_products = L*N*H*k;
int blocks = ceil(float(total_products)/(k));
const int shared_mem = (((k * E) + 2*k)* sizeof(float));
sparse_weighted_average_kernel<<<blocks,
threads,
shared_mem>>>(
weights_a,
values_a,
topk_a,
output_a,
N,
H,
L,
E,
k,
n_dim_per_thread
);
}
__global__ void sparse_weighted_average_backward_kernel(
const float_accessor_4d weights,
const float_accessor_4d values,
const int64_accessor_4d topk,
const float_accessor_4d grad_out,
float_accessor_4d grad_weights,
float_accessor_4d grad_values,
int N,
int H,
int L,
int E,
int k,
int dim_per_thread
) {
int full_index = blockIdx.x * blockDim.x + threadIdx.x;
int n = full_index / (H*L*k);
int h = (full_index - n*H*L*k) / (L*k);
int l = (full_index - n*H*L*k - h*L*k) / k;
int j = full_index % k;
if (n >= N) {
return;
}
int key_idx = topk[n][h][l][j];
int start_dim = threadIdx.y * dim_per_thread;
int end_dim = start_dim + dim_per_thread;
if (threadIdx.y == 0) {
grad_weights[n][h][l][j] = dot(
&values[n][h][key_idx][0],
&grad_out[n][h][l][0],
E
);
}
float weight = weights[n][h][l][j];
for (int e=start_dim; e<end_dim; e++) {
atomicAdd(
&grad_values[n][h][key_idx][e],
weight * grad_out[n][h][l][e]
);
}
}
void sparse_weighted_average_backward(
const torch::Tensor weights,
const torch::Tensor values,
const torch::Tensor topk,
const torch::Tensor grad_out,
torch::Tensor grad_weights,
torch::Tensor grad_values
) {
int N = weights.size(0);
int H = weights.size(1);
int L = weights.size(2);
int k = weights.size(3);
int E = values.size(3);
auto weights_a = weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto values_a = values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto topk_a = topk.packed_accessor32<int64_t, 4, torch::RestrictPtrTraits>();
auto grad_out_a = grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto grad_weights_a = grad_weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
auto grad_values_a = grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>();
int threads_x = 256;
int threads_y = 4;
int dim_per_thread = E / threads_y;
dim3 threads(threads_x, threads_y);
int blocks = (N*H*L*k + threads_x - 1)/threads_x;
sparse_weighted_average_backward_kernel<<<blocks, threads>>>(
weights_a,
values_a,
topk_a,
grad_out_a,
grad_weights_a,
grad_values_a,
N,
H,
L,
E,
k,
dim_per_thread
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"sparse_dot_product",
&sparse_dot_product,
"Compute the dot product only in the positions specified by topk."
);
m.def(
"sparse_dot_backward",
&sparse_dot_backward,
"Compute the gradients for the sparse dot product."
);
m.def(
"sparse_weighted_average",
&sparse_weighted_average,
"Average the values using the sparse attention."
);
m.def(
"sparse_weighted_average_backward",
&sparse_weighted_average_backward,
"Compute the gradients for the sparse weighted average."
);
}
|
5af5803d77d365614f49d5a02e3054e1da5a9944.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_12.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6679348371438,0.00126289690273847,0.782139392386233,0.781865608713176,0.000172404928940328,0.486127277700911,0.00291951812635996,0.999998380572501,1.90168928165571e-08,1.86610449282458e-05,0.999771036737330,1.00692726104090,0.999992367801412,4.91009808167198e-05,0.428312222270385,9.82202217307503,139.832938601023}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9696794605935,0.000146813666101197,0.000135269147896234,0.000356799487796099,0.263567942744581,0.188167539339376,0.134068214689468,3.47241511354903,0.0169001614928138,2.06053018692506,1096.49876510497,0.000404969947809233,0.0900116374999560,0.0166264296010243,0.00231521824295121,5.33812055242267e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
5af5803d77d365614f49d5a02e3054e1da5a9944.cu
|
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_12.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6679348371438,0.00126289690273847,0.782139392386233,0.781865608713176,0.000172404928940328,0.486127277700911,0.00291951812635996,0.999998380572501,1.90168928165571e-08,1.86610449282458e-05,0.999771036737330,1.00692726104090,0.999992367801412,4.91009808167198e-05,0.428312222270385,9.82202217307503,139.832938601023}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9696794605935,0.000146813666101197,0.000135269147896234,0.000356799487796099,0.263567942744581,0.188167539339376,0.134068214689468,3.47241511354903,0.0169001614928138,2.06053018692506,1096.49876510497,0.000404969947809233,0.0900116374999560,0.0166264296010243,0.00231521824295121,5.33812055242267e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
350c0205df9bbfddd72be7082ba5c34ab8694f61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuckoo-cuda-naive.cuh"
#include "cuckoo-cuda-multi.cuh"
#include <map>
#include <algorithm>
#include <cstdlib>
#include <cmath>
#include <chrono>
#include <ctime>
/** Random input generator. */
static void
gen_rnd_input(uint32_t * const vals, const int n, const uint32_t limit) {
std::map<uint32_t, bool> val_map;
int count = 0;
while (count < n) {
uint32_t val = (rand() % (limit - 1)) + 1;
if (val_map.find(val) != val_map.end())
continue;
val_map[val] = true;
vals[count] = val;
count++;
}
}
/** Dummy kernel. */
__global__ void
incKernel(int * const ptr, const int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
ptr[idx] += 1;
}
/**
*
* Main entrance for the performance demo.
*
* Prerequirests: we assume
* 1. Value range do not cover EMPTY_CELL (i.e. 0).
* 2. Value range do not exceed value-field width.
* 3. No repeated keys inserted (so we skipped duplication check).
* 4. Table size must be a multiple of BUCKET_SIZE.
* 5. Only inserting into an empty table. No updating. (o.w. the rehashing part should be rewritten.)
*
* Currently supported types:
* uint[8, 16, 32]_t
*
*/
int
main(void) {
// Random seed.
srand(time(NULL));
// Experiment 0.
std::cout << "Experiment 0 (Time for malloc & kernel invoke) -->" << std::endl;
{
int size = 0x1 << 25;
int table_size = size * 3;
int *ptr = new int[table_size]();
int *d_ptr;
auto ts_malloc = std::chrono::high_resolution_clock::now();
hipMalloc((void **) &d_ptr, table_size * sizeof(int));
auto ts_memcpy = std::chrono::high_resolution_clock::now();
hipMemcpy(d_ptr, ptr, table_size * sizeof(int), hipMemcpyHostToDevice);
auto ts_kernel = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( incKernel), dim3(ceil((float) table_size / 256)), dim3(256), 0, 0, d_ptr, table_size);
auto ts_cpybck = std::chrono::high_resolution_clock::now();
hipMemcpy(ptr, d_ptr, table_size * sizeof(int), hipMemcpyDeviceToHost);
auto te_cpybck = std::chrono::high_resolution_clock::now();
std::cout << " [Malloc]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_memcpy - ts_malloc).count()
<< " ms" << std::endl
<< " [Memcpy]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_kernel - ts_memcpy).count()
<< " ms" << std::endl
<< " [Kernel]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_cpybck - ts_kernel).count()
<< " ms" << std::endl
<< " [Cpybck]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te_cpybck - ts_cpybck).count()
<< " ms" << std::endl;
hipFree(d_ptr);
}
// Experiment 1 - Insertion time.
std::cout << "Experiment 1 (Insertion time) -->" << std::endl;
{
int num_funcs = 3, n = 0x1 << 24, size = 2 * n;
uint32_t *vals_to_insert = new uint32_t[n];
gen_rnd_input(vals_to_insert, n, 0x1 << 30);
for (int rep = 0; rep < 5; ++rep) {
std::cout << " rep " << rep << ": " << std::flush;
// CUDA naive.
{
CuckooHashTableCuda_Naive<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
auto ts = std::chrono::high_resolution_clock::now();
int levels = hash_table.insert_vals(vals_to_insert, n, 0);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Naive] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms - ";
if (levels == ERR_DEPTH)
std::cout << "exceeds " << MAX_DEPTH << " levels | " << std::flush;
else
std::cout << std::setw(2) << levels << " rehash(es) | " << std::flush;
}
// CUDA multi-level.
{
CuckooHashTableCuda_Multi<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
auto ts = std::chrono::high_resolution_clock::now();
int levels = hash_table.insert_vals(vals_to_insert, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Multi] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms - ";
if (levels == ERR_DEPTH)
std::cout << "exceeds " << MAX_DEPTH << " levels" << std::endl;
else
std::cout << std::setw(2) << levels << " rehash(es)" << std::endl;
}
}
delete[] vals_to_insert;
}
// Experiment 2 - Lookup time.
std::cout << "Experiment 2 (Lookup time) -->" << std::endl;
{
int num_funcs = 3, size = 0x1 << 25, n = 0x1 << 24, percent = 5;
uint32_t *vals_to_insert = new uint32_t[n];
gen_rnd_input(vals_to_insert, n, 0x1 << 30);
uint32_t *vals_to_lookup = new uint32_t[n];
bool *results = new bool[n];
int bound = ceil((1 - 0.1 * percent) * n);
for (int rep = 0; rep < 5; ++rep) {
for (int i = 0; i < bound; ++i)
vals_to_lookup[i] = vals_to_insert[rand() % n];
for (int i = bound; i < n; ++i)
vals_to_lookup[i] = (rand() % ((0x1 << 30) - 1)) + 1;
std::cout << " rep " << rep << ": " << std::flush;
// CUDA naive.
{
CuckooHashTableCuda_Naive<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
hash_table.insert_vals(vals_to_insert, n, 0);
auto ts = std::chrono::high_resolution_clock::now();
hash_table.lookup_vals(vals_to_lookup, results, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Naive] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms | " << std::flush;
}
// CUDA multi-level.
{
CuckooHashTableCuda_Multi<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
hash_table.insert_vals(vals_to_insert, n);
auto ts = std::chrono::high_resolution_clock::now();
hash_table.lookup_vals(vals_to_lookup, results, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Multi] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms" << std::endl;
}
}
delete[] vals_to_insert;
delete[] vals_to_lookup;
delete[] results;
}
}
|
350c0205df9bbfddd72be7082ba5c34ab8694f61.cu
|
#include "cuckoo-cuda-naive.cuh"
#include "cuckoo-cuda-multi.cuh"
#include <map>
#include <algorithm>
#include <cstdlib>
#include <cmath>
#include <chrono>
#include <ctime>
/** Random input generator. */
static void
gen_rnd_input(uint32_t * const vals, const int n, const uint32_t limit) {
std::map<uint32_t, bool> val_map;
int count = 0;
while (count < n) {
uint32_t val = (rand() % (limit - 1)) + 1;
if (val_map.find(val) != val_map.end())
continue;
val_map[val] = true;
vals[count] = val;
count++;
}
}
/** Dummy kernel. */
__global__ void
incKernel(int * const ptr, const int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
ptr[idx] += 1;
}
/**
*
* Main entrance for the performance demo.
*
* Prerequirests: we assume
* 1. Value range do not cover EMPTY_CELL (i.e. 0).
* 2. Value range do not exceed value-field width.
* 3. No repeated keys inserted (so we skipped duplication check).
* 4. Table size must be a multiple of BUCKET_SIZE.
* 5. Only inserting into an empty table. No updating. (o.w. the rehashing part should be rewritten.)
*
* Currently supported types:
* uint[8, 16, 32]_t
*
*/
int
main(void) {
// Random seed.
srand(time(NULL));
// Experiment 0.
std::cout << "Experiment 0 (Time for malloc & kernel invoke) -->" << std::endl;
{
int size = 0x1 << 25;
int table_size = size * 3;
int *ptr = new int[table_size]();
int *d_ptr;
auto ts_malloc = std::chrono::high_resolution_clock::now();
cudaMalloc((void **) &d_ptr, table_size * sizeof(int));
auto ts_memcpy = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_ptr, ptr, table_size * sizeof(int), cudaMemcpyHostToDevice);
auto ts_kernel = std::chrono::high_resolution_clock::now();
incKernel<<<ceil((float) table_size / 256), 256>>>(d_ptr, table_size);
auto ts_cpybck = std::chrono::high_resolution_clock::now();
cudaMemcpy(ptr, d_ptr, table_size * sizeof(int), cudaMemcpyDeviceToHost);
auto te_cpybck = std::chrono::high_resolution_clock::now();
std::cout << " [Malloc]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_memcpy - ts_malloc).count()
<< " ms" << std::endl
<< " [Memcpy]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_kernel - ts_memcpy).count()
<< " ms" << std::endl
<< " [Kernel]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(ts_cpybck - ts_kernel).count()
<< " ms" << std::endl
<< " [Cpybck]: " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te_cpybck - ts_cpybck).count()
<< " ms" << std::endl;
cudaFree(d_ptr);
}
// Experiment 1 - Insertion time.
std::cout << "Experiment 1 (Insertion time) -->" << std::endl;
{
int num_funcs = 3, n = 0x1 << 24, size = 2 * n;
uint32_t *vals_to_insert = new uint32_t[n];
gen_rnd_input(vals_to_insert, n, 0x1 << 30);
for (int rep = 0; rep < 5; ++rep) {
std::cout << " rep " << rep << ": " << std::flush;
// CUDA naive.
{
CuckooHashTableCuda_Naive<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
auto ts = std::chrono::high_resolution_clock::now();
int levels = hash_table.insert_vals(vals_to_insert, n, 0);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Naive] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms - ";
if (levels == ERR_DEPTH)
std::cout << "exceeds " << MAX_DEPTH << " levels | " << std::flush;
else
std::cout << std::setw(2) << levels << " rehash(es) | " << std::flush;
}
// CUDA multi-level.
{
CuckooHashTableCuda_Multi<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
auto ts = std::chrono::high_resolution_clock::now();
int levels = hash_table.insert_vals(vals_to_insert, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Multi] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms - ";
if (levels == ERR_DEPTH)
std::cout << "exceeds " << MAX_DEPTH << " levels" << std::endl;
else
std::cout << std::setw(2) << levels << " rehash(es)" << std::endl;
}
}
delete[] vals_to_insert;
}
// Experiment 2 - Lookup time.
std::cout << "Experiment 2 (Lookup time) -->" << std::endl;
{
int num_funcs = 3, size = 0x1 << 25, n = 0x1 << 24, percent = 5;
uint32_t *vals_to_insert = new uint32_t[n];
gen_rnd_input(vals_to_insert, n, 0x1 << 30);
uint32_t *vals_to_lookup = new uint32_t[n];
bool *results = new bool[n];
int bound = ceil((1 - 0.1 * percent) * n);
for (int rep = 0; rep < 5; ++rep) {
for (int i = 0; i < bound; ++i)
vals_to_lookup[i] = vals_to_insert[rand() % n];
for (int i = bound; i < n; ++i)
vals_to_lookup[i] = (rand() % ((0x1 << 30) - 1)) + 1;
std::cout << " rep " << rep << ": " << std::flush;
// CUDA naive.
{
CuckooHashTableCuda_Naive<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
hash_table.insert_vals(vals_to_insert, n, 0);
auto ts = std::chrono::high_resolution_clock::now();
hash_table.lookup_vals(vals_to_lookup, results, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Naive] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms | " << std::flush;
}
// CUDA multi-level.
{
CuckooHashTableCuda_Multi<uint32_t> hash_table(size, 4 * ceil(log2((double) n)),
num_funcs);
hash_table.insert_vals(vals_to_insert, n);
auto ts = std::chrono::high_resolution_clock::now();
hash_table.lookup_vals(vals_to_lookup, results, n);
auto te = std::chrono::high_resolution_clock::now();
std::cout << "[Multi] " << std::setw(5)
<< std::chrono::duration_cast<std::chrono::milliseconds>(te - ts).count()
<< " ms" << std::endl;
}
}
delete[] vals_to_insert;
delete[] vals_to_lookup;
delete[] results;
}
}
|
a076fbe8e2fa2f09cdff694da09f24f60870e68f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
#include <stdio.h>
// You can use these to set the x- and y- dimensions of a block.
// Your choice for these should impact how you divide a grid into blocks.
#define BLOCK_X 1
#define BLOCK_Y 1
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const dim3 blockDim(BLOCK_X, BLOCK_Y);
const int blockRow = ?;
const int blockCol = ?;
const int row = ?;
const int col = ?;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//Note: h_rgbaImage is passed in only so that you can run serial calculations if you wish.
//The d_rgbaImage data has already been copied to the device in HW1.cpp), and d_greyImage will be
//copied back (in main.cpp)
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int numBlockRows = ?;
const int numBlockCols = ?;
const dim3 blockDim(BLOCK_X, BLOCK_Y);
const dim3 gridDim( 1, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridDim), dim3(blockDim), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
|
a076fbe8e2fa2f09cdff694da09f24f60870e68f.cu
|
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
#include <stdio.h>
// You can use these to set the x- and y- dimensions of a block.
// Your choice for these should impact how you divide a grid into blocks.
#define BLOCK_X 1
#define BLOCK_Y 1
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const dim3 blockDim(BLOCK_X, BLOCK_Y);
const int blockRow = ?;
const int blockCol = ?;
const int row = ?;
const int col = ?;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//Note: h_rgbaImage is passed in only so that you can run serial calculations if you wish.
//The d_rgbaImage data has already been copied to the device in HW1.cpp), and d_greyImage will be
//copied back (in main.cpp)
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const int numBlockRows = ?;
const int numBlockCols = ?;
const dim3 blockDim(BLOCK_X, BLOCK_Y);
const dim3 gridDim( 1, 1, 1); //TODO
rgba_to_greyscale<<<gridDim, blockDim>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
470c1ceef1c1408c81b8c28de796c458cb9c35b9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* CUDA Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandelbrot maxiter
* where
* maxiter denotes the maximum number of iterations at each point
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters.
*
*
* Code originally obtained from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
* Reformatted and revised by B. Massingill and C. Parrot and C.Schuller
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#include <cutil.h>
#include <hip/hip_runtime.h>
//#ifdef WITH_DISPLAY
#include "mandelbrot-gui.h" /* has setup(), interact() */
//#endif
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 600 /* size of display window in pixels */
/* Structure definition for complex numbers */
typedef struct {
float r, i;
} complex ;
/* Shorthand for some commonly-used types */
typedef unsigned int uint;
typedef unsigned long ulong;
/* Description pixel */
typedef struct {
uint col, row;
ulong couleur;
} Pixel ;
__global__ void mandelbrot_gpu(ulong *vect_d, ulong max_color, ulong min_color, float scale_r, float scale_i, float scale_color, int maxiter) {
complex z, c;
int col;
int row;
int k;
float r_min = -N;
float i_min = -N;
float lengthsq, temp;
ulong couleur;
// Parametrer l'operation avec threadIdx.x;
col = threadIdx.x + (blockIdx.x * blockDim.x);
row = threadIdx.y + (blockIdx.y * blockDim.y);
z.r = z.i = 0;
/* Scale display coordinates to actual region */
c.r = r_min + ((float) col * scale_r);
c.i = i_min + ((float) (NPIXELS-1-row) * scale_i);
/* Calculate z0, z1, .... until divergence or maximum iterations */
k = 0;
do {
temp = z.r*z.r - z.i*z.i + c.r;
z.i = 2*z.r*z.i + c.i;
z.r = temp;
lengthsq = z.r*z.r + z.i*z.i;
++k;
} while (lengthsq < (N*N) && k < maxiter);
/* Scale color and display point */
couleur = (ulong) ((k-1) * scale_color) + min_color;
vect_d[col+row*NPIXELS]=couleur;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
/////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
uint maxiter;
float r_min = -N;
float r_max = N;
float i_min = -N;
float i_max = N;
uint width = NPIXELS; /* dimensions of display window */
uint height = NPIXELS;
Display *display;
Window win;
GC gc;
int setup_return;
ulong min_color, max_color;
float scale_r, scale_i, scale_color;
uint k;
ulong * vect_h;
ulong * vect_d;
unsigned int timer;
int nbblockligne, nbblockcolonne;
/* Check command-line arguments */
if (argc < 2) {
fprintf(stderr, "usage: %s maxiter \n", argv[0]);
return EXIT_FAILURE;
}
vect_h = (ulong *) malloc(sizeof(ulong) * NPIXELS * NPIXELS);
/* Alocate memory on device */
CUDA_SAFE_CALL(hipMalloc((void**) &vect_d, sizeof(ulong) * NPIXELS * NPIXELS));
//printf("Debut du programme\n");
/* Process command-line arguments */
maxiter = atoi(argv[1]);
#ifdef WITH_DISPLAY
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
}
#else
min_color=0;
max_color=16777215;
#endif
/* Calculate and draw points */
/* Compute factors to scale computational region to window */
scale_r = (float) (r_max - r_min) / (float) width;
scale_i = (float) (i_max - i_min) / (float) height;
/* Compute factor for color scaling */
scale_color = (float) (max_color - min_color) / (float) (maxiter - 1);
/* Calcul */
if ((NPIXELS % 200) ==0)
nbblockligne = NPIXELS/200;
else
nbblockligne = (NPIXELS/200)+1;
if ((NPIXELS % 2) ==0)
nbblockcolonne = NPIXELS/2;
else
nbblockcolonne = (NPIXELS/2)+1;
dim3 nbblockbygrid(nbblockligne, nbblockcolonne);
dim3 nbthreadbyblock(200,2);
cutCreateTimer(&timer);
cutStartTimer(timer);
//mandelbrot_gpu(ulong vect_d[][], ulong maxcolor, ulong mincolor, float scale_r, float scale_i, float scale_color, int maxiter)
hipLaunchKernelGGL(( mandelbrot_gpu) , dim3(nbblockbygrid), dim3(nbthreadbyblock), 0, 0, vect_d, max_color, min_color, scale_r, scale_i, scale_color, maxiter);
CUDA_SAFE_CALL(hipDeviceSynchronize());
cutStopTimer(timer);
printf("%f\n",cutGetTimerValue(timer));
cutDeleteTimer(timer);
CUDA_SAFE_CALL(hipMemcpy((void*)vect_h, (void*) vect_d, sizeof(ulong) * NPIXELS * NPIXELS, hipMemcpyDeviceToHost));
//printf("Fin du calcul des pixels GPU\n");
#ifdef WITH_DISPLAY
//printf("Debut affichage\n");
for (k=0; k<(NPIXELS*NPIXELS); k++)
if (setup_return == EXIT_SUCCESS) {
XSetForeground (display, gc, vect_h[k]);
XDrawPoint (display, win, gc, k%NPIXELS, k/NPIXELS);
}
#endif
//printf("Fin affichage\n");
free(vect_h);
hipFree(vect_d);
//printf("Fin attente\n");
return EXIT_SUCCESS;
}
|
470c1ceef1c1408c81b8c28de796c458cb9c35b9.cu
|
/*
* CUDA Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandelbrot maxiter
* where
* maxiter denotes the maximum number of iterations at each point
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters.
*
*
* Code originally obtained from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
* Reformatted and revised by B. Massingill and C. Parrot and C.Schuller
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#include <cutil.h>
#include <cuda.h>
//#ifdef WITH_DISPLAY
#include "mandelbrot-gui.h" /* has setup(), interact() */
//#endif
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 600 /* size of display window in pixels */
/* Structure definition for complex numbers */
typedef struct {
float r, i;
} complex ;
/* Shorthand for some commonly-used types */
typedef unsigned int uint;
typedef unsigned long ulong;
/* Description pixel */
typedef struct {
uint col, row;
ulong couleur;
} Pixel ;
__global__ void mandelbrot_gpu(ulong *vect_d, ulong max_color, ulong min_color, float scale_r, float scale_i, float scale_color, int maxiter) {
complex z, c;
int col;
int row;
int k;
float r_min = -N;
float i_min = -N;
float lengthsq, temp;
ulong couleur;
// Parametrer l'operation avec threadIdx.x;
col = threadIdx.x + (blockIdx.x * blockDim.x);
row = threadIdx.y + (blockIdx.y * blockDim.y);
z.r = z.i = 0;
/* Scale display coordinates to actual region */
c.r = r_min + ((float) col * scale_r);
c.i = i_min + ((float) (NPIXELS-1-row) * scale_i);
/* Calculate z0, z1, .... until divergence or maximum iterations */
k = 0;
do {
temp = z.r*z.r - z.i*z.i + c.r;
z.i = 2*z.r*z.i + c.i;
z.r = temp;
lengthsq = z.r*z.r + z.i*z.i;
++k;
} while (lengthsq < (N*N) && k < maxiter);
/* Scale color and display point */
couleur = (ulong) ((k-1) * scale_color) + min_color;
vect_d[col+row*NPIXELS]=couleur;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
/////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
uint maxiter;
float r_min = -N;
float r_max = N;
float i_min = -N;
float i_max = N;
uint width = NPIXELS; /* dimensions of display window */
uint height = NPIXELS;
Display *display;
Window win;
GC gc;
int setup_return;
ulong min_color, max_color;
float scale_r, scale_i, scale_color;
uint k;
ulong * vect_h;
ulong * vect_d;
unsigned int timer;
int nbblockligne, nbblockcolonne;
/* Check command-line arguments */
if (argc < 2) {
fprintf(stderr, "usage: %s maxiter \n", argv[0]);
return EXIT_FAILURE;
}
vect_h = (ulong *) malloc(sizeof(ulong) * NPIXELS * NPIXELS);
/* Alocate memory on device */
CUDA_SAFE_CALL(cudaMalloc((void**) &vect_d, sizeof(ulong) * NPIXELS * NPIXELS));
//printf("Debut du programme\n");
/* Process command-line arguments */
maxiter = atoi(argv[1]);
#ifdef WITH_DISPLAY
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
}
#else
min_color=0;
max_color=16777215;
#endif
/* Calculate and draw points */
/* Compute factors to scale computational region to window */
scale_r = (float) (r_max - r_min) / (float) width;
scale_i = (float) (i_max - i_min) / (float) height;
/* Compute factor for color scaling */
scale_color = (float) (max_color - min_color) / (float) (maxiter - 1);
/* Calcul */
if ((NPIXELS % 200) ==0)
nbblockligne = NPIXELS/200;
else
nbblockligne = (NPIXELS/200)+1;
if ((NPIXELS % 2) ==0)
nbblockcolonne = NPIXELS/2;
else
nbblockcolonne = (NPIXELS/2)+1;
dim3 nbblockbygrid(nbblockligne, nbblockcolonne);
dim3 nbthreadbyblock(200,2);
cutCreateTimer(&timer);
cutStartTimer(timer);
//mandelbrot_gpu(ulong vect_d[][], ulong maxcolor, ulong mincolor, float scale_r, float scale_i, float scale_color, int maxiter)
mandelbrot_gpu <<<nbblockbygrid, nbthreadbyblock>>> (vect_d, max_color, min_color, scale_r, scale_i, scale_color, maxiter);
CUDA_SAFE_CALL(cudaThreadSynchronize());
cutStopTimer(timer);
printf("%f\n",cutGetTimerValue(timer));
cutDeleteTimer(timer);
CUDA_SAFE_CALL(cudaMemcpy((void*)vect_h, (void*) vect_d, sizeof(ulong) * NPIXELS * NPIXELS, cudaMemcpyDeviceToHost));
//printf("Fin du calcul des pixels GPU\n");
#ifdef WITH_DISPLAY
//printf("Debut affichage\n");
for (k=0; k<(NPIXELS*NPIXELS); k++)
if (setup_return == EXIT_SUCCESS) {
XSetForeground (display, gc, vect_h[k]);
XDrawPoint (display, win, gc, k%NPIXELS, k/NPIXELS);
}
#endif
//printf("Fin affichage\n");
free(vect_h);
cudaFree(vect_d);
//printf("Fin attente\n");
return EXIT_SUCCESS;
}
|
e334f8c855a3fd7d02934fc8a37336ee90eb32c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/TextureTools.h"
#include <stdio.h>
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMono
*/
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void gpu_hpmc_excell_kernel(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji)
{
// compute the output cell
unsigned int my_cell = 0;
if (gridDim.y > 1)
{
// if gridDim.y > 1, then the fermi workaround is in place, index blocks on a 2D grid
my_cell = (blockIdx.x + blockIdx.y * 65535) * blockDim.x + threadIdx.x;
}
else
{
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
}
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
unsigned int neigh_cell_size = d_cell_size[neigh_cell];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = tex1Dfetch(cell_idx_tex, cli(k, neigh_cell));
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel driver for gpu_hpmc_excell_kernel()
hipError_t gpu_hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
static int sm = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, gpu_hpmc_excell_kernel);
max_block_size = attr.maxThreadsPerBlock;
sm = attr.binaryVersion;
}
// setup the grid to run the kernel
dim3 threads(min(block_size, (unsigned int)max_block_size), 1, 1);
dim3 grid(ci.getNumElements() / block_size + 1, 1, 1);
// hack to enable grids of more than 65k blocks
if (sm < 30 && grid.x > 65535)
{
grid.y = grid.x / 65535 + 1;
grid.x = 65535;
}
// bind the textures
cell_idx_tex.normalized = false;
cell_idx_tex.filterMode = hipFilterModePoint;
hipError_t error = hipBindTexture(0, cell_idx_tex, d_cell_idx, sizeof(unsigned int)*cli.getNumElements());
if (error != hipSuccess)
return error;
hipLaunchKernelGGL(( gpu_hpmc_excell_kernel), dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji);
return hipSuccess;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void gpu_hpmc_shift_kernel(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//! Kernel driver for gpu_hpmc_shift_kernel()
hipError_t gpu_hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(( gpu_hpmc_shift_kernel), dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
return hipSuccess;
}
}; // end namespace detail
} // end namespace hpmc
|
e334f8c855a3fd7d02934fc8a37336ee90eb32c9.cu
|
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/TextureTools.h"
#include <stdio.h>
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMono
*/
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void gpu_hpmc_excell_kernel(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji)
{
// compute the output cell
unsigned int my_cell = 0;
if (gridDim.y > 1)
{
// if gridDim.y > 1, then the fermi workaround is in place, index blocks on a 2D grid
my_cell = (blockIdx.x + blockIdx.y * 65535) * blockDim.x + threadIdx.x;
}
else
{
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
}
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
unsigned int neigh_cell_size = d_cell_size[neigh_cell];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = tex1Dfetch(cell_idx_tex, cli(k, neigh_cell));
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel driver for gpu_hpmc_excell_kernel()
cudaError_t gpu_hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
static int sm = -1;
if (max_block_size == -1)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, gpu_hpmc_excell_kernel);
max_block_size = attr.maxThreadsPerBlock;
sm = attr.binaryVersion;
}
// setup the grid to run the kernel
dim3 threads(min(block_size, (unsigned int)max_block_size), 1, 1);
dim3 grid(ci.getNumElements() / block_size + 1, 1, 1);
// hack to enable grids of more than 65k blocks
if (sm < 30 && grid.x > 65535)
{
grid.y = grid.x / 65535 + 1;
grid.x = 65535;
}
// bind the textures
cell_idx_tex.normalized = false;
cell_idx_tex.filterMode = cudaFilterModePoint;
cudaError_t error = cudaBindTexture(0, cell_idx_tex, d_cell_idx, sizeof(unsigned int)*cli.getNumElements());
if (error != cudaSuccess)
return error;
gpu_hpmc_excell_kernel<<<grid, threads>>>(d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji);
return cudaSuccess;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void gpu_hpmc_shift_kernel(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//! Kernel driver for gpu_hpmc_shift_kernel()
cudaError_t gpu_hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
gpu_hpmc_shift_kernel<<<grid_shift, threads_shift>>>(d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
cudaDeviceSynchronize();
return cudaSuccess;
}
}; // end namespace detail
} // end namespace hpmc
|
3a495ca68f166558fe8691757491c7ff0c693069.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = threadIdx.x; // Index of the current thread in the block
int stride = blockDim.x; // Number of threads in the current block
for (int i = index; i < n; i += stride) {
out[i] = a[i] + b[i];
}
}
int main() {
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
// Initialize host arrays
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
hipMalloc((void **)&d_a, sizeof(float) * N);
hipMalloc((void **)&d_b, sizeof(float) * N);
hipMalloc((void **)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
// Executing kernel
hipLaunchKernelGGL(( vector_add), dim3(1), dim3(256), 0, 0, d_out, d_a, d_b, N);
// Transfer data back to host memory
hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost);
// Verification
for (int i = 0; i < N; i++) {
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
// Deallocate device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
3a495ca68f166558fe8691757491c7ff0c693069.cu
|
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = threadIdx.x; // Index of the current thread in the block
int stride = blockDim.x; // Number of threads in the current block
for (int i = index; i < n; i += stride) {
out[i] = a[i] + b[i];
}
}
int main() {
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float *)malloc(sizeof(float) * N);
b = (float *)malloc(sizeof(float) * N);
out = (float *)malloc(sizeof(float) * N);
// Initialize host arrays
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
cudaMalloc((void **)&d_a, sizeof(float) * N);
cudaMalloc((void **)&d_b, sizeof(float) * N);
cudaMalloc((void **)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing kernel
vector_add<<<1, 256>>>(d_out, d_a, d_b, N);
// Transfer data back to host memory
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// Verification
for (int i = 0; i < N; i++) {
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
3a5ffb43559911503e713af5be54dc43a59fbf81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/d_t_s.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void depthToSpaceKernel(void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, const int block_size, const bool isNHWC) {
T *input_ptr = reinterpret_cast<T *>(vx);
T *output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_area = input_width * input_height;
const int input_depth_by_input_area = input_depth * input_area;
const int output_depth_by_input_height = output_depth * input_height;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * output_height * output_width * output_depth;
for (int out_idx = tid; out_idx < total_count; out_idx += blockDim.x * gridDim.x) {
const int d = out_idx % output_depth;
const int out_idx2 = out_idx / output_depth;
const int w = out_idx2 % output_width;
const int out_idx3 = out_idx2 / output_width;
const int h = out_idx3 % output_height;
const int b = out_idx3 / output_height;
const int in_h = h / block_size;
const int offset_h = h % block_size;
const int in_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * output_depth;
const int in_d = d + offset_d;
const int inp_idx = in_d + input_depth * (in_w + input_width * (in_h + input_height * b));
(output_ptr + out_idx)[0] = (input_ptr + inp_idx)[0];
}
} else {
const int total_count = batch_size * input_depth_by_input_area;
for (int input_idx = tid; input_idx < total_count; input_idx += blockDim.x * gridDim.x) {
const int n_bY_bX_oC_iY = input_idx / input_width;
const int iX = input_idx - n_bY_bX_oC_iY * input_width;
const int n_bY_bX = n_bY_bX_oC_iY / output_depth_by_input_height;
const int oC_iY = n_bY_bX_oC_iY - n_bY_bX * output_depth_by_input_height;
const int n_bY = n_bY_bX / block_size;
const int bX = n_bY_bX - n_bY * block_size;
const int n = n_bY / block_size;
const int bY = n_bY - n * block_size;
const int output_idx = bX + block_size * (iX + input_width * (bY + block_size * (oC_iY + n * output_depth_by_input_height)));
(output_ptr + output_idx)[0] = (input_ptr + input_idx)[0];
}
}
}
template <typename T>
static void __depthToSpace(nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
hipLaunchKernelGGL(( depthToSpaceKernel<T>), dim3(512), dim3(512), 1024, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _depthToSpace(nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
auto xType = input->dataType();
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(xType, __depthToSpace, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void __depthToSpace, (nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC);, LIBND4J_TYPES);
}
}
}
|
3a5ffb43559911503e713af5be54dc43a59fbf81.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/d_t_s.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void depthToSpaceKernel(void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, const int block_size, const bool isNHWC) {
T *input_ptr = reinterpret_cast<T *>(vx);
T *output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_area = input_width * input_height;
const int input_depth_by_input_area = input_depth * input_area;
const int output_depth_by_input_height = output_depth * input_height;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * output_height * output_width * output_depth;
for (int out_idx = tid; out_idx < total_count; out_idx += blockDim.x * gridDim.x) {
const int d = out_idx % output_depth;
const int out_idx2 = out_idx / output_depth;
const int w = out_idx2 % output_width;
const int out_idx3 = out_idx2 / output_width;
const int h = out_idx3 % output_height;
const int b = out_idx3 / output_height;
const int in_h = h / block_size;
const int offset_h = h % block_size;
const int in_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * output_depth;
const int in_d = d + offset_d;
const int inp_idx = in_d + input_depth * (in_w + input_width * (in_h + input_height * b));
(output_ptr + out_idx)[0] = (input_ptr + inp_idx)[0];
}
} else {
const int total_count = batch_size * input_depth_by_input_area;
for (int input_idx = tid; input_idx < total_count; input_idx += blockDim.x * gridDim.x) {
const int n_bY_bX_oC_iY = input_idx / input_width;
const int iX = input_idx - n_bY_bX_oC_iY * input_width;
const int n_bY_bX = n_bY_bX_oC_iY / output_depth_by_input_height;
const int oC_iY = n_bY_bX_oC_iY - n_bY_bX * output_depth_by_input_height;
const int n_bY = n_bY_bX / block_size;
const int bX = n_bY_bX - n_bY * block_size;
const int n = n_bY / block_size;
const int bY = n_bY - n * block_size;
const int output_idx = bX + block_size * (iX + input_width * (bY + block_size * (oC_iY + n * output_depth_by_input_height)));
(output_ptr + output_idx)[0] = (input_ptr + input_idx)[0];
}
}
}
template <typename T>
static void __depthToSpace(nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
depthToSpaceKernel<T><<<512, 512, 1024, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _depthToSpace(nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
auto xType = input->dataType();
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(xType, __depthToSpace, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void __depthToSpace, (nd4j::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC);, LIBND4J_TYPES);
}
}
}
|
416ba454e858bfa34acc354c7cc6c3c493a0a4f9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "integral.cuh"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/core/gpumat.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void scanCols(uchar* in, float* out, int width, int height, int in_step, int out_step)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= width)
return;
const float scale = 1.0f / 255.0f;
uchar* ptr_in = in + id;
float* ptr_out = out + id;
float pre_out = *ptr_in;
pre_out = scale * pre_out;
*ptr_out = pre_out;
for(int i = 1; i < height; i++)
{
ptr_out += out_step;
ptr_in += in_step;
pre_out = pre_out + scale * (*ptr_in);
*ptr_out = pre_out;
}
}
__global__ void scanRows(float* out, int width, int height, int step)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= height)
return;
float* ptr_out = out + id * step;
float pre_out = *ptr_out;
for(int i = 1; i < width; i++)
{
ptr_out += 1;
pre_out = pre_out + (*ptr_out);
*ptr_out = pre_out;
}
}
void Integral(const cv::gpu::GpuMat& source, cv::gpu::GpuMat& out)
{
cv::TickMeter tm;
tm.start();
out.create(source.size(), CV_32F);
dim3 blocksize(256);
dim3 gridsize((source.cols + blocksize.x - 1) / blocksize.x);
hipLaunchKernelGGL(( scanCols) , dim3(gridsize), dim3(blocksize) , 0, 0, (uchar*) source.data, (float*)out.data,
source.cols, source.rows, source.step, out.step / 4);
hipLaunchKernelGGL(( scanRows) , dim3(gridsize), dim3(blocksize) , 0, 0, (float*)out.data, out.cols, out.rows, out.step / 4);
hipDeviceSynchronize();
tm.stop();
printf("CUDA integral %fms\n",tm.getTimeMilli());
/*//////////////////////////////////check integral////////////////////////////////
cv::Mat test;
out.download(test);
printf("check integral\n");
float* ptr = (float*)test.data;
for(int i = 0; i < 10; i++)
{
printf("%f ",ptr[i * 2]);
}puts("");
/////////////////////////////////////////////////////////////////////////////////*/
}
|
416ba454e858bfa34acc354c7cc6c3c493a0a4f9.cu
|
#include "integral.cuh"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/core/gpumat.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void scanCols(uchar* in, float* out, int width, int height, int in_step, int out_step)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= width)
return;
const float scale = 1.0f / 255.0f;
uchar* ptr_in = in + id;
float* ptr_out = out + id;
float pre_out = *ptr_in;
pre_out = scale * pre_out;
*ptr_out = pre_out;
for(int i = 1; i < height; i++)
{
ptr_out += out_step;
ptr_in += in_step;
pre_out = pre_out + scale * (*ptr_in);
*ptr_out = pre_out;
}
}
__global__ void scanRows(float* out, int width, int height, int step)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= height)
return;
float* ptr_out = out + id * step;
float pre_out = *ptr_out;
for(int i = 1; i < width; i++)
{
ptr_out += 1;
pre_out = pre_out + (*ptr_out);
*ptr_out = pre_out;
}
}
void Integral(const cv::gpu::GpuMat& source, cv::gpu::GpuMat& out)
{
cv::TickMeter tm;
tm.start();
out.create(source.size(), CV_32F);
dim3 blocksize(256);
dim3 gridsize((source.cols + blocksize.x - 1) / blocksize.x);
scanCols <<< gridsize, blocksize >>> ((uchar*) source.data, (float*)out.data,
source.cols, source.rows, source.step, out.step / 4);
scanRows <<< gridsize, blocksize >>> ((float*)out.data, out.cols, out.rows, out.step / 4);
cudaDeviceSynchronize();
tm.stop();
printf("CUDA integral %fms\n",tm.getTimeMilli());
/*//////////////////////////////////check integral////////////////////////////////
cv::Mat test;
out.download(test);
printf("check integral\n");
float* ptr = (float*)test.data;
for(int i = 0; i < 10; i++)
{
printf("%f ",ptr[i * 2]);
}puts("");
/////////////////////////////////////////////////////////////////////////////////*/
}
|
e3719bb471e5087a2ac858df223b5f74157c79c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nll_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename K>
__global__ void NLLForward(const int32_t num_samples, const K num_classes, const K class_start,
const K ignore_index, const T* input, const K* target, const T* weight,
T* out, T* out_weight) {
const T zero = GetZeroVal<T>();
const T one = GetOneVal<T>();
CUDA_1D_KERNEL_LOOP(i, num_samples) {
K label = target[i];
T w = zero;
T y = zero;
if (label != ignore_index) {
label -= class_start;
if (label >= 0 && label < num_classes) {
w = weight ? weight[label] : one;
y = -(input[i * num_classes + label] * w);
}
}
out[i] = y;
out_weight[i] = w;
}
}
template<typename T, typename K>
__global__ void NLLBackward(const int32_t num_samples, const K num_classes, const K class_start,
const K ignore_index, const T* out_grad, const K* target,
const T* weight, T* in_grad) {
const T one = GetOneVal<T>();
const T zero = GetZeroVal<T>();
CUDA_1D_KERNEL_LOOP_T(K, i, num_samples * num_classes) {
const K n = i / num_classes;
const K idx = i - n * num_classes;
const K label = target[n];
if (label != ignore_index && idx == label - class_start) {
in_grad[i] = out_grad[n] * (weight ? -weight[idx] : -one);
} else {
in_grad[i] = zero;
}
}
}
} // namespace
template<typename T, typename K>
struct NLLKernelUtil<DeviceType::kCUDA, T, K> {
static void Forward(ep::Stream* stream, const int32_t num_samples, const K num_classes,
const K class_start, const K ignore_index, const T* input, const K* target,
const T* weight, T* out, T* out_weight) {
hipLaunchKernelGGL(( NLLForward), dim3(BlocksNum4ThreadsNum(num_samples)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), num_samples, num_classes,
class_start, ignore_index, input,
target, weight, out, out_weight);
}
static void Backward(ep::Stream* stream, const int32_t num_samples, const K num_classes,
const K class_start, const K ignore_index, const T* out_grad,
const K* target, const T* weight, T* in_grad) {
hipLaunchKernelGGL(( NLLBackward), dim3(BlocksNum4ThreadsNum(num_samples)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
num_samples, num_classes, class_start, ignore_index, out_grad, target, weight, in_grad);
}
};
template struct NLLKernelUtil<DeviceType::kCUDA, float, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, float, int64_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, double, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, double, int64_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, half, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, half, int64_t>;
} // namespace oneflow
|
e3719bb471e5087a2ac858df223b5f74157c79c9.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nll_kernel_util.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename K>
__global__ void NLLForward(const int32_t num_samples, const K num_classes, const K class_start,
const K ignore_index, const T* input, const K* target, const T* weight,
T* out, T* out_weight) {
const T zero = GetZeroVal<T>();
const T one = GetOneVal<T>();
CUDA_1D_KERNEL_LOOP(i, num_samples) {
K label = target[i];
T w = zero;
T y = zero;
if (label != ignore_index) {
label -= class_start;
if (label >= 0 && label < num_classes) {
w = weight ? weight[label] : one;
y = -(input[i * num_classes + label] * w);
}
}
out[i] = y;
out_weight[i] = w;
}
}
template<typename T, typename K>
__global__ void NLLBackward(const int32_t num_samples, const K num_classes, const K class_start,
const K ignore_index, const T* out_grad, const K* target,
const T* weight, T* in_grad) {
const T one = GetOneVal<T>();
const T zero = GetZeroVal<T>();
CUDA_1D_KERNEL_LOOP_T(K, i, num_samples * num_classes) {
const K n = i / num_classes;
const K idx = i - n * num_classes;
const K label = target[n];
if (label != ignore_index && idx == label - class_start) {
in_grad[i] = out_grad[n] * (weight ? -weight[idx] : -one);
} else {
in_grad[i] = zero;
}
}
}
} // namespace
template<typename T, typename K>
struct NLLKernelUtil<DeviceType::kCUDA, T, K> {
static void Forward(ep::Stream* stream, const int32_t num_samples, const K num_classes,
const K class_start, const K ignore_index, const T* input, const K* target,
const T* weight, T* out, T* out_weight) {
NLLForward<<<BlocksNum4ThreadsNum(num_samples), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(num_samples, num_classes,
class_start, ignore_index, input,
target, weight, out, out_weight);
}
static void Backward(ep::Stream* stream, const int32_t num_samples, const K num_classes,
const K class_start, const K ignore_index, const T* out_grad,
const K* target, const T* weight, T* in_grad) {
NLLBackward<<<BlocksNum4ThreadsNum(num_samples), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
num_samples, num_classes, class_start, ignore_index, out_grad, target, weight, in_grad);
}
};
template struct NLLKernelUtil<DeviceType::kCUDA, float, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, float, int64_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, double, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, double, int64_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, half, int32_t>;
template struct NLLKernelUtil<DeviceType::kCUDA, half, int64_t>;
} // namespace oneflow
|
894ff096c358d8d5b8e929b1f4cb138ba0fd3c41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void incKernel (float *data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
data[idx] = data[idx] + 1.0f;
}
int main (int argc, char * argv []) {
int n = 16 * 1024 * 1024;
int numBytes = n * sizeof (float);
//
float *a = new float[n];
for (int i = 0; i < n; i++)
a [i] = 0.0f;
//
float *dev = NULL;
hipMalloc((void**) &dev, numBytes);
//
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// cuda
hipEvent_t start, stop;
float gpuTime = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
// GPU ( 0)
hipEventRecord(start, 0);
hipMemcpy(dev, a, numBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( incKernel), dim3(blocks), dim3(threads), 0, 0, dev);
hipMemcpy(a, dev, numBytes, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
// CPU GPU
printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime);
//
printf("--------------------------------------------------------------\n");
for (int i = 0; i < n; i++)
if (a [i] != 1.0f) {
printf ("Error in pos %d, %f\n", i, a[i]);
break;
}
//
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(dev);
delete a;
return 0;
}
|
894ff096c358d8d5b8e929b1f4cb138ba0fd3c41.cu
|
#include <stdio.h>
__global__ void incKernel (float *data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
data[idx] = data[idx] + 1.0f;
}
int main (int argc, char * argv []) {
int n = 16 * 1024 * 1024;
int numBytes = n * sizeof (float);
// выделение памяти на хосте
float *a = new float[n];
for (int i = 0; i < n; i++)
a [i] = 0.0f;
// выделение памяти на девайсе
float *dev = NULL;
cudaMalloc((void**) &dev, numBytes);
// Устоновка конфигурации запуска ядра
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// создание обработчиков событий cuda
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// асинхронно выдаем работу на GPU (все в поток 0)
cudaEventRecord(start, 0);
cudaMemcpy(dev, a, numBytes, cudaMemcpyHostToDevice);
incKernel<<<blocks, threads>>>(dev);
cudaMemcpy(a, dev, numBytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
// Печатаем время работы на CPU и GPU
printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime);
// проверка аутпута на корректность
printf("--------------------------------------------------------------\n");
for (int i = 0; i < n; i++)
if (a [i] != 1.0f) {
printf ("Error in pos %d, %f\n", i, a[i]);
break;
}
// освобождение ресурсов
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dev);
delete a;
return 0;
}
|
80b089d466d851899f9d19b15fc0aee021c9e6e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
magma_zparic_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const magmaDoubleComplex * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
magmaDoubleComplex *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex s, sp;
int il, iu, jl, ju;
if ( k < nnz ) {
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1]) {
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else {
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
s += sp; // undo the last operation (it must be the last)
// modify entry
if (i == j) // diagonal
val[il-1] = MAGMA_Z_MAKE( sqrt( fabs( MAGMA_Z_REAL(s) )), 0.0 );
else //sub-diagonal
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_z_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zparic_csr(
magma_z_matrix A,
magma_z_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_zparic_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
|
80b089d466d851899f9d19b15fc0aee021c9e6e3.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
__global__ void
magma_zparic_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const magmaDoubleComplex * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
magmaDoubleComplex *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex s, sp;
int il, iu, jl, ju;
if ( k < nnz ) {
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1]) {
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else {
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
s += sp; // undo the last operation (it must be the last)
// modify entry
if (i == j) // diagonal
val[il-1] = MAGMA_Z_MAKE( sqrt( fabs( MAGMA_Z_REAL(s) )), 0.0 );
else //sub-diagonal
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
For reference, see:
E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization",
SIAM Journal on Scientific Computing, 37, C169-C193 (2015).
This routine was used in the ISC 2015 paper:
E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete
Factorizations on GPUs",
ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015.
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_z_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zparic_csr(
magma_z_matrix A,
magma_z_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_zparic_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
|
8bf02b04355d0ecf5eaea7f524fe5c095288c9bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define XSIZE 12800
#define YSIZE 12800
#define MAXITER 255
#define PIXEL(i,j) ((i)+(j)*XSIZE)
// Same as PS1
typedef unsigned char uchar;
typedef struct {
float real;
float imag;
} complex_t;
// implement these
void calculate_cuda(float x_start, float ylower, float step);
__global__
void julia_kernel(int* pixel_device, float x_start, float ylower, float step);
// utilities
void output_bmp();
double walltime();
void calculate_serial();
float x_start=-2.01;
float x_end=1;
float yupper;
float ylower;
float ycenter=1e-6;
float step;
complex_t julia_num;
int pixel_host[XSIZE*YSIZE];
int pixel[XSIZE*YSIZE];
double walltime() {
static struct timeval t;
gettimeofday(&t, NULL);
return (t.tv_sec + 1e-6 * t.tv_usec);
}
int* pixel_device;
// Set up the cuda memory transfers, launch your kernel and extract the finished image
void calculate_cuda(float x_start, float ylower, float step){
//int* pixel_device;
hipMalloc(&pixel_device, XSIZE*YSIZE*sizeof(int));
size_t threads_per_block_dim = 32;
// Assumin that XSIZE and YSIZE is dividable by 32
dim3 gridBlock(XSIZE/threads_per_block_dim, YSIZE/threads_per_block_dim);
dim3 threadBlock(threads_per_block_dim, threads_per_block_dim);
hipLaunchKernelGGL(( julia_kernel), dim3(gridBlock), dim3(threadBlock), 0, 0, pixel_device, x_start, ylower, step);
hipMemcpy(pixel_host, pixel_device, XSIZE*YSIZE*sizeof(int), hipMemcpyDeviceToHost);
hipFree(pixel_device);
}
// Implement the kernel responsible for iterating a single pixel
__global__
void julia_kernel(int* pixel_device, float x_start, float ylower, float step){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
complex_t c, z, temp;
int iter = 0;
c.real = (x_start + step * i);
c.imag = (ylower + step * j);
z = c;
while(z.real*z.real + z.imag*z.imag < 4){
temp.real = z.real*z.real - z.imag*z.imag + c.real;
temp.imag = 2.0*z.real*z.imag + c.imag;
z = temp;
if(++iter==MAXITER) break;
}
pixel_device[PIXEL(i,j)] = iter;
}
int main(int argc, char **argv) {
if(argc==1) {
puts("Usage: JULIA\n");
puts("Input real and imaginary part. ex: ./julia 0.0 -0.8");
return 0;
}
julia_num.real = strtod(argv[1], NULL);
julia_num.imag = strtod(argv[2], NULL);
/* Calculate the range in the y - axis such that we preserve the aspect ratio */
step = (x_end - x_start)/XSIZE;
yupper = ycenter + (step * YSIZE)/2;
ylower = ycenter - (step * YSIZE)/2;
printf("Calculating with the serial implementation...\n");
double start_serial = walltime();
calculate_serial();
double end_serial = walltime();
printf("Computation complete. It took %7.3f ms\n\n\n", end_serial - start_serial);
printf("Checking GPU(s)\n");
int n_devices;
hipGetDeviceCount(&n_devices);
printf("Number of CUDA devices: %d\n", n_devices);
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, 0);
printf("CUDA device name 1: %s\n" , device_prop.name);
if((n_devices < 1) || (n_devices > 2)){
printf("You're either on more than 2 GPUs, or something is broken\n");
printf("Exiting");
exit(0);
}
printf("Calculating with CUDA...\n");
//hipMalloc(&pixel_device, XSIZE*YSIZE*sizeof(int));
double start_gpu = walltime();
calculate_cuda(x_start, ylower, step);
double end_gpu = walltime();
//hipMemcpy(pixel_host, pixel_device, XSIZE*YSIZE*sizeof(int), hipMemcpyDeviceToHost);
//hipFree(pixel_device);
printf("Computation complete. It took %7.10f ms\n", end_gpu - start_gpu);
//output_bmp();
return 0;
}
//////////////////////////////////////////
//////////////////////////////////////////
//////////////////////////////////////////
////// UTILITIES, ALREADY IMPLEMENTED
complex_t add_complex(complex_t a, complex_t b){
complex_t temp;
temp.real = a.real + b.real;
temp.imag = a.imag + b.imag;
return temp;
}
complex_t add_real(complex_t a, int b){
complex_t temp;
temp.real = a.real + b;
return temp;
}
complex_t square_complex(complex_t c){
complex_t temp;
temp.real = c.real*c.real - (c.imag*c.imag);
temp.imag = 2*c.imag*c.real;
return temp;
}
void savebmp(char *name,uchar *buffer,int x,int y);
void fancycolour(uchar *p,int iter);
void output_bmp(){
unsigned char* img_buffer = (unsigned char*)calloc(XSIZE*YSIZE*3, 1);
for(int ii = 0; ii < XSIZE; ii++){
for(int jj = 0; jj < YSIZE; jj++){
int p=((YSIZE-jj-1)*XSIZE+ii)*3;
fancycolour(img_buffer+p,pixel_host[PIXEL(ii,jj)]);
}
}
char filename[20] = "julia.bmp";
savebmp(filename, img_buffer, XSIZE, YSIZE);
free(img_buffer);
}
/* save 24-bits bmp file, buffer must be in bmp format: upside-down */
void savebmp(char *name,uchar *buffer,int x,int y) {
FILE *f=fopen(name,"wb");
if(!f) {
printf("Error writing image to disk.\n");
return;
}
unsigned int size=x*y*3+54;
uchar header[54]={'B','M',size&255,(size>>8)&255,(size>>16)&255,size>>24,0,
0,0,0,54,0,0,0,40,0,0,0,x&255,x>>8,0,0,y&255,y>>8,0,0,1,0,24,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
fwrite(header,1,54,f);
fwrite(buffer,1,XSIZE*YSIZE*3,f);
fclose(f);
}
/* given iteration number, set a colour */
void fancycolour(uchar *p,int iter) {
if(iter==MAXITER);
else if(iter<8) { p[0]=128+iter*16; p[1]=p[2]=0; }
else if(iter<24) { p[0]=255; p[1]=p[2]=(iter-8)*16; }
else if(iter<160) { p[0]=p[1]=255-(iter-24)*2; p[2]=255; }
else { p[0]=p[1]=(iter-160)*2; p[2]=255-(iter-160)*2; }
}
void calculate_serial() {
for(int i=0;i<XSIZE;i++) {
for(int j=0;j<YSIZE;j++) {
/* Calculate the number of iterations until divergence for each pixel.
If divergence never happens, return MAXITER */
complex_t c;
complex_t z;
complex_t temp;
int iter=0;
// find our starting complex number c
c.real = (x_start + step*i);
c.imag = (ylower + step*j);
// our starting z is c
z = c;
// iterate until we escape
while(z.real*z.real + z.imag*z.imag < 4) {
temp.real = (z.real*z.real) - (z.imag*z.imag);
temp.imag = 2*z.real*z.imag;
temp.real += julia_num.real;
temp.imag += julia_num.imag;
z = temp;
if(++iter==MAXITER) break;
}
pixel[PIXEL(i,j)]=iter;
}
}
}
|
8bf02b04355d0ecf5eaea7f524fe5c095288c9bd.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#include <math.h>
#define XSIZE 12800
#define YSIZE 12800
#define MAXITER 255
#define PIXEL(i,j) ((i)+(j)*XSIZE)
// Same as PS1
typedef unsigned char uchar;
typedef struct {
float real;
float imag;
} complex_t;
// implement these
void calculate_cuda(float x_start, float ylower, float step);
__global__
void julia_kernel(int* pixel_device, float x_start, float ylower, float step);
// utilities
void output_bmp();
double walltime();
void calculate_serial();
float x_start=-2.01;
float x_end=1;
float yupper;
float ylower;
float ycenter=1e-6;
float step;
complex_t julia_num;
int pixel_host[XSIZE*YSIZE];
int pixel[XSIZE*YSIZE];
double walltime() {
static struct timeval t;
gettimeofday(&t, NULL);
return (t.tv_sec + 1e-6 * t.tv_usec);
}
int* pixel_device;
// Set up the cuda memory transfers, launch your kernel and extract the finished image
void calculate_cuda(float x_start, float ylower, float step){
//int* pixel_device;
cudaMalloc(&pixel_device, XSIZE*YSIZE*sizeof(int));
size_t threads_per_block_dim = 32;
// Assumin that XSIZE and YSIZE is dividable by 32
dim3 gridBlock(XSIZE/threads_per_block_dim, YSIZE/threads_per_block_dim);
dim3 threadBlock(threads_per_block_dim, threads_per_block_dim);
julia_kernel<<<gridBlock, threadBlock>>>(pixel_device, x_start, ylower, step);
cudaMemcpy(pixel_host, pixel_device, XSIZE*YSIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(pixel_device);
}
// Implement the kernel responsible for iterating a single pixel
__global__
void julia_kernel(int* pixel_device, float x_start, float ylower, float step){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
complex_t c, z, temp;
int iter = 0;
c.real = (x_start + step * i);
c.imag = (ylower + step * j);
z = c;
while(z.real*z.real + z.imag*z.imag < 4){
temp.real = z.real*z.real - z.imag*z.imag + c.real;
temp.imag = 2.0*z.real*z.imag + c.imag;
z = temp;
if(++iter==MAXITER) break;
}
pixel_device[PIXEL(i,j)] = iter;
}
int main(int argc, char **argv) {
if(argc==1) {
puts("Usage: JULIA\n");
puts("Input real and imaginary part. ex: ./julia 0.0 -0.8");
return 0;
}
julia_num.real = strtod(argv[1], NULL);
julia_num.imag = strtod(argv[2], NULL);
/* Calculate the range in the y - axis such that we preserve the aspect ratio */
step = (x_end - x_start)/XSIZE;
yupper = ycenter + (step * YSIZE)/2;
ylower = ycenter - (step * YSIZE)/2;
printf("Calculating with the serial implementation...\n");
double start_serial = walltime();
calculate_serial();
double end_serial = walltime();
printf("Computation complete. It took %7.3f ms\n\n\n", end_serial - start_serial);
printf("Checking GPU(s)\n");
int n_devices;
cudaGetDeviceCount(&n_devices);
printf("Number of CUDA devices: %d\n", n_devices);
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, 0);
printf("CUDA device name 1: %s\n" , device_prop.name);
if((n_devices < 1) || (n_devices > 2)){
printf("You're either on more than 2 GPUs, or something is broken\n");
printf("Exiting");
exit(0);
}
printf("Calculating with CUDA...\n");
//cudaMalloc(&pixel_device, XSIZE*YSIZE*sizeof(int));
double start_gpu = walltime();
calculate_cuda(x_start, ylower, step);
double end_gpu = walltime();
//cudaMemcpy(pixel_host, pixel_device, XSIZE*YSIZE*sizeof(int), cudaMemcpyDeviceToHost);
//cudaFree(pixel_device);
printf("Computation complete. It took %7.10f ms\n", end_gpu - start_gpu);
//output_bmp();
return 0;
}
//////////////////////////////////////////
//////////////////////////////////////////
//////////////////////////////////////////
////// UTILITIES, ALREADY IMPLEMENTED
complex_t add_complex(complex_t a, complex_t b){
complex_t temp;
temp.real = a.real + b.real;
temp.imag = a.imag + b.imag;
return temp;
}
complex_t add_real(complex_t a, int b){
complex_t temp;
temp.real = a.real + b;
return temp;
}
complex_t square_complex(complex_t c){
complex_t temp;
temp.real = c.real*c.real - (c.imag*c.imag);
temp.imag = 2*c.imag*c.real;
return temp;
}
void savebmp(char *name,uchar *buffer,int x,int y);
void fancycolour(uchar *p,int iter);
void output_bmp(){
unsigned char* img_buffer = (unsigned char*)calloc(XSIZE*YSIZE*3, 1);
for(int ii = 0; ii < XSIZE; ii++){
for(int jj = 0; jj < YSIZE; jj++){
int p=((YSIZE-jj-1)*XSIZE+ii)*3;
fancycolour(img_buffer+p,pixel_host[PIXEL(ii,jj)]);
}
}
char filename[20] = "julia.bmp";
savebmp(filename, img_buffer, XSIZE, YSIZE);
free(img_buffer);
}
/* save 24-bits bmp file, buffer must be in bmp format: upside-down */
void savebmp(char *name,uchar *buffer,int x,int y) {
FILE *f=fopen(name,"wb");
if(!f) {
printf("Error writing image to disk.\n");
return;
}
unsigned int size=x*y*3+54;
uchar header[54]={'B','M',size&255,(size>>8)&255,(size>>16)&255,size>>24,0,
0,0,0,54,0,0,0,40,0,0,0,x&255,x>>8,0,0,y&255,y>>8,0,0,1,0,24,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
fwrite(header,1,54,f);
fwrite(buffer,1,XSIZE*YSIZE*3,f);
fclose(f);
}
/* given iteration number, set a colour */
void fancycolour(uchar *p,int iter) {
if(iter==MAXITER);
else if(iter<8) { p[0]=128+iter*16; p[1]=p[2]=0; }
else if(iter<24) { p[0]=255; p[1]=p[2]=(iter-8)*16; }
else if(iter<160) { p[0]=p[1]=255-(iter-24)*2; p[2]=255; }
else { p[0]=p[1]=(iter-160)*2; p[2]=255-(iter-160)*2; }
}
void calculate_serial() {
for(int i=0;i<XSIZE;i++) {
for(int j=0;j<YSIZE;j++) {
/* Calculate the number of iterations until divergence for each pixel.
If divergence never happens, return MAXITER */
complex_t c;
complex_t z;
complex_t temp;
int iter=0;
// find our starting complex number c
c.real = (x_start + step*i);
c.imag = (ylower + step*j);
// our starting z is c
z = c;
// iterate until we escape
while(z.real*z.real + z.imag*z.imag < 4) {
temp.real = (z.real*z.real) - (z.imag*z.imag);
temp.imag = 2*z.real*z.imag;
temp.real += julia_num.real;
temp.imag += julia_num.imag;
z = temp;
if(++iter==MAXITER) break;
}
pixel[PIXEL(i,j)]=iter;
}
}
}
|
f3a79736eb1cae970c1f52391d7c4518e60f5b3d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
class FloatToHalfCUDA : public Operator<CUDAContext> {
public:
FloatToHalfCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~FloatToHalfCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
};
class HalfToFloatCUDA : public Operator<CUDAContext> {
public:
HalfToFloatCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~HalfToFloatCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
};
namespace {
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfCUDA);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatCUDA);
OPERATOR_SCHEMA(FloatToHalf).NumInputs(1).NumOutputs(1);
OPERATOR_SCHEMA(HalfToFloat).NumInputs(1).NumOutputs(1);
class GetFloatToHalfGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"HalfToFloat", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(FloatToHalf, GetFloatToHalfGradient);
class GetHalfToFloatGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"FloatToHalf", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(HalfToFloat, GetHalfToFloatGradient);
} // namespace
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
|
f3a79736eb1cae970c1f52391d7c4518e60f5b3d.cu
|
#include "caffe2/core/common_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
class FloatToHalfCUDA : public Operator<CUDAContext> {
public:
FloatToHalfCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~FloatToHalfCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
FloatToHalfKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
};
class HalfToFloatCUDA : public Operator<CUDAContext> {
public:
HalfToFloatCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~HalfToFloatCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
HalfToFloatKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
};
namespace {
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfCUDA);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatCUDA);
OPERATOR_SCHEMA(FloatToHalf).NumInputs(1).NumOutputs(1);
OPERATOR_SCHEMA(HalfToFloat).NumInputs(1).NumOutputs(1);
class GetFloatToHalfGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"HalfToFloat", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(FloatToHalf, GetFloatToHalfGradient);
class GetHalfToFloatGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"FloatToHalf", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(HalfToFloat, GetHalfToFloatGradient);
} // namespace
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
|
aa89140a4c3635964d803d08b593e0a4fbacf446.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess){
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define LOOP 2000
#define TOL 1e-6
#define ARR_LEN 2000
__global__ void kernel_sor(float *d_A, float *d_B) {
const int row_thread_x = ARR_LEN/blockDim.x;
const int row_thread_y = ARR_LEN/blockDim.y;
const int threadStart = blockIdx.x * row_thread_x;
const int threadEnd = (blockIdx.x+1)*row_thread_x -1;
const int width = threadStart - threadEnd;
int index,iter;
index = row_thread_x*width+row_thread_y;
// checkboundaries
for(iter =1; iter <LOOP; iter++){
if((row_thread_x > 0) && (row_thread_y > 0) && (row_thread_x < ARR_LEN-1) && (row_thread_y < ARR_LEN-1))
d_B[index] = d_A[index-1]
+ d_A[index+1]
+ d_A[index+ARR_LEN]
+ d_A[index-ARR_LEN];
__syncthreads();
}
}
main (int argc, char **argv) {
float A[ARR_LEN][ARR_LEN], B[ARR_LEN][ARR_LEN];
float *d_A, *d_B;
float *h_B; // output of B from the GPU to CPU
int i, j,iter;
int num_bytes = ARR_LEN * ARR_LEN * sizeof(float);
int errCount = 0;
// Input is randomly generated
for(i=0;i<ARR_LEN;i++) {
for(j=0;j<ARR_LEN;j++) {
A[i][j] = (float) rand()/1234;
}
}
hipEvent_t start_event0, stop_event0;
float elapsed_time0;
CUDA_SAFE_CALL( hipEventCreate(&start_event0) );
CUDA_SAFE_CALL( hipEventCreate(&stop_event0) );
hipEventRecord(start_event0, 0);
// CPU computation
for(iter=1;iter<LOOP;iter++){
for(i=1;i<ARR_LEN-1;i++) {
for(j=1;j<ARR_LEN-1;j++) {
B[i][j] = A[i-1][j]+A[i+1][j]+A[i][j-1]+A[i][j+1];
}
}
}
hipEventRecord(stop_event0, 0);
hipEventSynchronize(stop_event0);
CUDA_SAFE_CALL( hipEventElapsedTime(&elapsed_time0,start_event0, stop_event0) );
h_B = (float *)malloc(num_bytes);
memset(h_B, 0, num_bytes);
//ALLOCATE MEMORY FOR GPU COPIES OF A AND B
hipMalloc((void**)&d_A, num_bytes);
hipMalloc((void**)&d_B, num_bytes);
hipMemset(d_A, 0, num_bytes);
hipMemset(d_B, 0, num_bytes);
//COPY A TO GPU
hipMemcpy(d_A, A, num_bytes, hipMemcpyHostToDevice);
// create CUDA event handles for timing purposes
hipEvent_t start_event, stop_event;
float elapsed_time;
CUDA_SAFE_CALL( hipEventCreate(&start_event) );
CUDA_SAFE_CALL( hipEventCreate(&stop_event) );
hipEventRecord(start_event, 0);
dim3 block_size(16,16); //values experimentally determined to be fastes
hipLaunchKernelGGL(( kernel_sor), dim3(1),dim3(block_size), 0, 0, d_A,d_B);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
CUDA_SAFE_CALL( hipEventElapsedTime(&elapsed_time,start_event, stop_event) );
//COPY B BACK FROM GPU
hipMemcpy(h_B, d_B, num_bytes, hipMemcpyDeviceToHost);
//TODO: Compare results
int index = iter + i*ARR_LEN;
for(iter = 0; iter < ARR_LEN; iter++){
for(i=0; i< ARR_LEN; i ++){
if(h_B[index] - B[iter][i] > TOL){
errCount ++;
}
}
}
printf("Error Count:\t%d\n",errCount);
printf("CPU computation time: \t%.2f ms\n", elapsed_time0);
printf("GPU computation time: \t%.2f ms\n", elapsed_time);
printf("CUDA Speedup:\t%.2fx\n",(elapsed_time0/elapsed_time));
hipFree(d_A);
hipFree(d_B);
free(h_B);
}
|
aa89140a4c3635964d803d08b593e0a4fbacf446.cu
|
#include <cstdlib>
#include <cstdio>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess){
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define LOOP 2000
#define TOL 1e-6
#define ARR_LEN 2000
__global__ void kernel_sor(float *d_A, float *d_B) {
const int row_thread_x = ARR_LEN/blockDim.x;
const int row_thread_y = ARR_LEN/blockDim.y;
const int threadStart = blockIdx.x * row_thread_x;
const int threadEnd = (blockIdx.x+1)*row_thread_x -1;
const int width = threadStart - threadEnd;
int index,iter;
index = row_thread_x*width+row_thread_y;
// checkboundaries
for(iter =1; iter <LOOP; iter++){
if((row_thread_x > 0) && (row_thread_y > 0) && (row_thread_x < ARR_LEN-1) && (row_thread_y < ARR_LEN-1))
d_B[index] = d_A[index-1]
+ d_A[index+1]
+ d_A[index+ARR_LEN]
+ d_A[index-ARR_LEN];
__syncthreads();
}
}
main (int argc, char **argv) {
float A[ARR_LEN][ARR_LEN], B[ARR_LEN][ARR_LEN];
float *d_A, *d_B;
float *h_B; // output of B from the GPU to CPU
int i, j,iter;
int num_bytes = ARR_LEN * ARR_LEN * sizeof(float);
int errCount = 0;
// Input is randomly generated
for(i=0;i<ARR_LEN;i++) {
for(j=0;j<ARR_LEN;j++) {
A[i][j] = (float) rand()/1234;
}
}
cudaEvent_t start_event0, stop_event0;
float elapsed_time0;
CUDA_SAFE_CALL( cudaEventCreate(&start_event0) );
CUDA_SAFE_CALL( cudaEventCreate(&stop_event0) );
cudaEventRecord(start_event0, 0);
// CPU computation
for(iter=1;iter<LOOP;iter++){
for(i=1;i<ARR_LEN-1;i++) {
for(j=1;j<ARR_LEN-1;j++) {
B[i][j] = A[i-1][j]+A[i+1][j]+A[i][j-1]+A[i][j+1];
}
}
}
cudaEventRecord(stop_event0, 0);
cudaEventSynchronize(stop_event0);
CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsed_time0,start_event0, stop_event0) );
h_B = (float *)malloc(num_bytes);
memset(h_B, 0, num_bytes);
//ALLOCATE MEMORY FOR GPU COPIES OF A AND B
cudaMalloc((void**)&d_A, num_bytes);
cudaMalloc((void**)&d_B, num_bytes);
cudaMemset(d_A, 0, num_bytes);
cudaMemset(d_B, 0, num_bytes);
//COPY A TO GPU
cudaMemcpy(d_A, A, num_bytes, cudaMemcpyHostToDevice);
// create CUDA event handles for timing purposes
cudaEvent_t start_event, stop_event;
float elapsed_time;
CUDA_SAFE_CALL( cudaEventCreate(&start_event) );
CUDA_SAFE_CALL( cudaEventCreate(&stop_event) );
cudaEventRecord(start_event, 0);
dim3 block_size(16,16); //values experimentally determined to be fastes
kernel_sor<<<1,block_size>>>(d_A,d_B);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsed_time,start_event, stop_event) );
//COPY B BACK FROM GPU
cudaMemcpy(h_B, d_B, num_bytes, cudaMemcpyDeviceToHost);
//TODO: Compare results
int index = iter + i*ARR_LEN;
for(iter = 0; iter < ARR_LEN; iter++){
for(i=0; i< ARR_LEN; i ++){
if(h_B[index] - B[iter][i] > TOL){
errCount ++;
}
}
}
printf("Error Count:\t%d\n",errCount);
printf("CPU computation time: \t%.2f ms\n", elapsed_time0);
printf("GPU computation time: \t%.2f ms\n", elapsed_time);
printf("CUDA Speedup:\t%.2fx\n",(elapsed_time0/elapsed_time));
cudaFree(d_A);
cudaFree(d_B);
free(h_B);
}
|
e057aac9c1780df4798e2eb2da637271027ebc0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ATen/ATen.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/hip/detail/IndexUtils.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cmath>
#include "ATen/TensorUtils.h"
#include "ATen/Type.h"
#include "ATen/AccumulateType.h"
#include <THH/THHGeneral.h>
#include "type_shim.h"
typedef enum{
ADAM_MODE_0 =0, // eps under square root
ADAM_MODE_1 =1 // eps outside square root
} adamMode_t;
template <typename T, typename GRAD_T>
__global__ void adam_cuda_kernel(
GRAD_T* __restrict__ p,
T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed
T* __restrict__ m,
T* __restrict__ v,
const GRAD_T * __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const size_t tsize,
adamMode_t mode,
const float decay)
{
//Assuming 2D grids and 2D blocks
const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
const int threadsPerBlock = blockDim.x * blockDim.y;
const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x;
const int i = (blockId * threadsPerBlock + threadIdInBlock);
const int totThreads = gridDim.x*gridDim.y*threadsPerBlock;
for (int j = i; j < tsize; j+=totThreads) {
T scaled_grad = g[j]/grad_scale;
m[j] = b1*m[j] + (1-b1)*scaled_grad;
v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad;
float denom;
if (mode == ADAM_MODE_0)
denom = sqrtf(v[j] + eps);
else // Mode 1
denom = sqrtf(v[j]) + eps;
float update = (m[j]/denom) + (decay*p[j]);
p[j] = (GRAD_T) (p[j] - (step_size*update));
if (p_copy != NULL) p_copy[j] = (GRAD_T) p[j];
}
}
void fused_adam_cuda(
at::Tensor & p,
at::Tensor & p_copy,
at::Tensor & m,
at::Tensor & v,
at::Tensor & g,
float lr,
float beta1,
float beta2,
float eps,
float grad_scale,
int step,
int mode,
int bias_correction,
float decay)
{
// using namespace at;
//Get tensor size
int tsize = p.numel();
//Determine #threads and #blocks
const int threadsPerBlock = 512;
const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock);
AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32");
//Constants
float step_size = 0;
if (bias_correction == 1) {
const float bias_correction1 = 1 - ::pow(beta1, step);
const float bias_correction2 = 1 - ::pow(beta2, step);
step_size = lr * std::sqrt(bias_correction2)/bias_correction1;
}
else {
step_size = lr;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (g.scalar_type() == at::ScalarType::Half) {
//all other values should be fp32 for half gradients
// AT_ASSERTM(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type");
//dispatch is done on the gradient type
using namespace at; // prevents "toString is undefined" errors
DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( adam_cuda_kernel<accscalar_t, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream,
p.data<scalar_t_0>(),
NULL, //don't output p_copy for fp32, it's wasted write
m.data<accscalar_t>(),
v.data<accscalar_t>(),
g.data<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
(adamMode_t) mode,
decay);
)
} else {
using namespace at;
DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel",
hipLaunchKernelGGL(( adam_cuda_kernel<scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream,
p.data<scalar_t_0>(),
NULL, //don't output p_copy for fp32, it's wasted write
m.data<scalar_t_0>(),
v.data<scalar_t_0>(),
g.data<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
(adamMode_t) mode,
decay);
);
}
THCudaCheck(hipGetLastError());
}
|
e057aac9c1780df4798e2eb2da637271027ebc0e.cu
|
#include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cmath>
#include "ATen/TensorUtils.h"
#include "ATen/Type.h"
#include "ATen/AccumulateType.h"
#include <THC/THCGeneral.h>
#include "type_shim.h"
typedef enum{
ADAM_MODE_0 =0, // eps under square root
ADAM_MODE_1 =1 // eps outside square root
} adamMode_t;
template <typename T, typename GRAD_T>
__global__ void adam_cuda_kernel(
GRAD_T* __restrict__ p,
T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed
T* __restrict__ m,
T* __restrict__ v,
const GRAD_T * __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const size_t tsize,
adamMode_t mode,
const float decay)
{
//Assuming 2D grids and 2D blocks
const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
const int threadsPerBlock = blockDim.x * blockDim.y;
const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x;
const int i = (blockId * threadsPerBlock + threadIdInBlock);
const int totThreads = gridDim.x*gridDim.y*threadsPerBlock;
for (int j = i; j < tsize; j+=totThreads) {
T scaled_grad = g[j]/grad_scale;
m[j] = b1*m[j] + (1-b1)*scaled_grad;
v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad;
float denom;
if (mode == ADAM_MODE_0)
denom = sqrtf(v[j] + eps);
else // Mode 1
denom = sqrtf(v[j]) + eps;
float update = (m[j]/denom) + (decay*p[j]);
p[j] = (GRAD_T) (p[j] - (step_size*update));
if (p_copy != NULL) p_copy[j] = (GRAD_T) p[j];
}
}
void fused_adam_cuda(
at::Tensor & p,
at::Tensor & p_copy,
at::Tensor & m,
at::Tensor & v,
at::Tensor & g,
float lr,
float beta1,
float beta2,
float eps,
float grad_scale,
int step,
int mode,
int bias_correction,
float decay)
{
// using namespace at;
//Get tensor size
int tsize = p.numel();
//Determine #threads and #blocks
const int threadsPerBlock = 512;
const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock);
AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32");
//Constants
float step_size = 0;
if (bias_correction == 1) {
const float bias_correction1 = 1 - std::pow(beta1, step);
const float bias_correction2 = 1 - std::pow(beta2, step);
step_size = lr * std::sqrt(bias_correction2)/bias_correction1;
}
else {
step_size = lr;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (g.scalar_type() == at::ScalarType::Half) {
//all other values should be fp32 for half gradients
// AT_ASSERTM(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type");
//dispatch is done on the gradient type
using namespace at; // prevents "toString is undefined" errors
DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
adam_cuda_kernel<accscalar_t, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.data<scalar_t_0>(),
NULL, //don't output p_copy for fp32, it's wasted write
m.data<accscalar_t>(),
v.data<accscalar_t>(),
g.data<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
(adamMode_t) mode,
decay);
)
} else {
using namespace at;
DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel",
adam_cuda_kernel<scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.data<scalar_t_0>(),
NULL, //don't output p_copy for fp32, it's wasted write
m.data<scalar_t_0>(),
v.data<scalar_t_0>(),
g.data<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
(adamMode_t) mode,
decay);
);
}
THCudaCheck(cudaGetLastError());
}
|
aecf32c8c9cf26a7acda8c33350875553e9bd9e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuArraysCopyExtract_C2R_FixedOffset.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float2 *imageIn = NULL;
hipMalloc(&imageIn, XSIZE*YSIZE);
const int inNX = 1;
const int inNY = 1;
float *imageOut = NULL;
hipMalloc(&imageOut, XSIZE*YSIZE);
const int outNX = 1;
const int outNY = 1;
const int nImages = 1;
const int offsetX = 1;
const int offsetY = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuArraysCopyExtract_C2R_FixedOffset), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuArraysCopyExtract_C2R_FixedOffset), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuArraysCopyExtract_C2R_FixedOffset), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
aecf32c8c9cf26a7acda8c33350875553e9bd9e5.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuArraysCopyExtract_C2R_FixedOffset.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float2 *imageIn = NULL;
cudaMalloc(&imageIn, XSIZE*YSIZE);
const int inNX = 1;
const int inNY = 1;
float *imageOut = NULL;
cudaMalloc(&imageOut, XSIZE*YSIZE);
const int outNX = 1;
const int outNY = 1;
const int nImages = 1;
const int offsetX = 1;
const int offsetY = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuArraysCopyExtract_C2R_FixedOffset<<<gridBlock,threadBlock>>>(imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuArraysCopyExtract_C2R_FixedOffset<<<gridBlock,threadBlock>>>(imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuArraysCopyExtract_C2R_FixedOffset<<<gridBlock,threadBlock>>>(imageIn,inNX,inNY,imageOut,outNX,outNY,nImages,offsetX,offsetY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
10d10e6bc34aeb5937e739ef08877b6c536a71e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#include <bits/stdc++.h>
using namespace std;
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define Mask_width 5
#define Mask_radius (Mask_width / 2)
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
#define CEIL(a, b) ((a-1)/b +1)
const int num_channels = 3;
__global__ void convolution (float *deviceInputImageData, float* __restrict__ deviceMaskData,
float *deviceOutputImageData, int imageChannels, int imageWidth, int imageHeight) {
int out_x = blockDim.x * blockIdx.x + threadIdx.x;
int out_y = blockDim.y * blockIdx.y + threadIdx.y;
if(out_x >= imageWidth || out_y >= imageHeight)
return;
for (int c = 0; c < num_channels; ++c) { // channels
float acc = 0;
for (int off_y = -Mask_radius; off_y <= Mask_radius; ++off_y) {
for (int off_x = -Mask_radius; off_x <= Mask_radius; ++off_x) {
int in_y = out_y + off_y;
int in_x = out_x + off_x;
int mask_y = Mask_radius + off_y;
int mask_x = Mask_radius + off_x;
if (in_y < imageHeight && in_y >= 0 && in_x < imageWidth && in_x >= 0) {
acc += deviceInputImageData[(in_y * imageWidth + in_x) * num_channels + c] *
deviceMaskData[mask_y * Mask_width + mask_x];
}
}
}
deviceOutputImageData[(out_y * imageWidth + out_x) * num_channels + c] = clamp(acc);
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows = Mask_width;
int maskColumns = Mask_width;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float));
hipMalloc((void **)&deviceOutputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float));
hipMalloc((void **)&deviceMaskData,
Mask_width * Mask_width * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceOutputImageData, hostOutputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData,
Mask_width * Mask_width * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 grid(CEIL(imageWidth, TILE_WIDTH), CEIL(imageHeight, TILE_WIDTH), 1);
dim3 block(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( convolution) , dim3(grid), dim3(block), 0, 0, deviceInputImageData, deviceMaskData,
deviceOutputImageData, imageChannels, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ Insert code here
free(hostInputImageData);
free(hostOutputImageData);
free(hostMaskData);
hipFree(deviceMaskData);
hipFree(deviceOutputImageData);
hipFree(deviceInputImageData);
}
|
10d10e6bc34aeb5937e739ef08877b6c536a71e3.cu
|
#include "wb.h"
#include <bits/stdc++.h>
using namespace std;
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define Mask_width 5
#define Mask_radius (Mask_width / 2)
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
#define CEIL(a, b) ((a-1)/b +1)
const int num_channels = 3;
__global__ void convolution (float *deviceInputImageData, float* __restrict__ deviceMaskData,
float *deviceOutputImageData, int imageChannels, int imageWidth, int imageHeight) {
int out_x = blockDim.x * blockIdx.x + threadIdx.x;
int out_y = blockDim.y * blockIdx.y + threadIdx.y;
if(out_x >= imageWidth || out_y >= imageHeight)
return;
for (int c = 0; c < num_channels; ++c) { // channels
float acc = 0;
for (int off_y = -Mask_radius; off_y <= Mask_radius; ++off_y) {
for (int off_x = -Mask_radius; off_x <= Mask_radius; ++off_x) {
int in_y = out_y + off_y;
int in_x = out_x + off_x;
int mask_y = Mask_radius + off_y;
int mask_x = Mask_radius + off_x;
if (in_y < imageHeight && in_y >= 0 && in_x < imageWidth && in_x >= 0) {
acc += deviceInputImageData[(in_y * imageWidth + in_x) * num_channels + c] *
deviceMaskData[mask_y * Mask_width + mask_x];
}
}
}
deviceOutputImageData[(out_y * imageWidth + out_x) * num_channels + c] = clamp(acc);
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows = Mask_width;
int maskColumns = Mask_width;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceMaskData,
Mask_width * Mask_width * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceOutputImageData, hostOutputImageData,
imageHeight * imageWidth * imageChannels * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData,
Mask_width * Mask_width * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 grid(CEIL(imageWidth, TILE_WIDTH), CEIL(imageHeight, TILE_WIDTH), 1);
dim3 block(TILE_WIDTH, TILE_WIDTH, 1);
convolution <<<grid, block>>> (deviceInputImageData, deviceMaskData,
deviceOutputImageData, imageChannels, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ Insert code here
free(hostInputImageData);
free(hostOutputImageData);
free(hostMaskData);
cudaFree(deviceMaskData);
cudaFree(deviceOutputImageData);
cudaFree(deviceInputImageData);
}
|
86ed2c192e54dfccfdbe1f32a3019ebc75b73c84.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd3(double *a, double *b, double *c,double *d,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
d[id] = a[id] + b[id]+c[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd4(double *a, double *b, double *c,double *d,double *e,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
e[id] = a[id] + b[id]+c[id]+d[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd5(double *a, double *b, double *c,double *d,double *e,double *f,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
f[id] = a[id] + b[id]+c[id]+d[id]+e[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
int tensor_num=0;
int tensor_size=0;
clock_t start1,start2, end2,end1;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
double *h_d;
double *h_e;
double *h_f;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
double *d_d;
double *d_e;
double *d_f;
{
tensor_num=atoi(argv[1]);
tensor_size=atoi(argv[2]);
n=tensor_size;
//printf("Tensor num: %d, tensor_size: %d\n",tensor_num,tensor_size);
}
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
h_d = (double*)malloc(bytes);
h_e = (double*)malloc(bytes);
h_f = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
hipMalloc(&d_d, bytes);
hipMalloc(&d_e, bytes);
hipMalloc(&d_f, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
start1=clock();
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
if(tensor_num>=3)
hipMemcpy( d_c, h_c, bytes, hipMemcpyHostToDevice);
if(tensor_num>=4)
hipMemcpy( d_d, h_d, bytes, hipMemcpyHostToDevice);
if(tensor_num>=5)
hipMemcpy( d_e, h_e, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
hipDeviceSynchronize();
start2=clock();
// Execute the kernel
if(tensor_num==2)
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
if(tensor_num==3)
hipLaunchKernelGGL(( vecAdd3), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c,d_d, n);
if(tensor_num==4)
hipLaunchKernelGGL(( vecAdd4), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c,d_d,d_e, n);
if(tensor_num==5)
hipLaunchKernelGGL(( vecAdd5), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c,d_d,d_e,d_f, n);
hipDeviceSynchronize();
end2=clock();
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
end1=clock();
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
//printf("final result: %f\n", sum/n);
float time1 = (float)(end1 - start1) / CLOCKS_PER_SEC;
float time2 = (float)(end2 - start2) / CLOCKS_PER_SEC;
printf("[%d, %d]: %f ms, computing: %f ms\n", tensor_num,tensor_size, time1*1000, time2*1000);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_e);
hipFree(d_f);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
free(h_d);
free(h_e);
free(h_f);
return 0;
}
|
86ed2c192e54dfccfdbe1f32a3019ebc75b73c84.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd3(double *a, double *b, double *c,double *d,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
d[id] = a[id] + b[id]+c[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd4(double *a, double *b, double *c,double *d,double *e,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
e[id] = a[id] + b[id]+c[id]+d[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd5(double *a, double *b, double *c,double *d,double *e,double *f,int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
f[id] = a[id] + b[id]+c[id]+d[id]+e[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
int tensor_num=0;
int tensor_size=0;
clock_t start1,start2, end2,end1;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
double *h_d;
double *h_e;
double *h_f;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
double *d_d;
double *d_e;
double *d_f;
{
tensor_num=atoi(argv[1]);
tensor_size=atoi(argv[2]);
n=tensor_size;
//printf("Tensor num: %d, tensor_size: %d\n",tensor_num,tensor_size);
}
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
h_d = (double*)malloc(bytes);
h_e = (double*)malloc(bytes);
h_f = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMalloc(&d_d, bytes);
cudaMalloc(&d_e, bytes);
cudaMalloc(&d_f, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
start1=clock();
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=3)
cudaMemcpy( d_c, h_c, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=4)
cudaMemcpy( d_d, h_d, bytes, cudaMemcpyHostToDevice);
if(tensor_num>=5)
cudaMemcpy( d_e, h_e, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
cudaDeviceSynchronize();
start2=clock();
// Execute the kernel
if(tensor_num==2)
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
if(tensor_num==3)
vecAdd3<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d, n);
if(tensor_num==4)
vecAdd4<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d,d_e, n);
if(tensor_num==5)
vecAdd5<<<gridSize, blockSize>>>(d_a, d_b, d_c,d_d,d_e,d_f, n);
cudaDeviceSynchronize();
end2=clock();
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
end1=clock();
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
//printf("final result: %f\n", sum/n);
float time1 = (float)(end1 - start1) / CLOCKS_PER_SEC;
float time2 = (float)(end2 - start2) / CLOCKS_PER_SEC;
printf("[%d, %d]: %f ms, computing: %f ms\n", tensor_num,tensor_size, time1*1000, time2*1000);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_e);
cudaFree(d_f);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
free(h_d);
free(h_e);
free(h_f);
return 0;
}
|
fa8745201e39ac7c5db1ed9b8797b1cb49ba2c75.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <math.h>
#include <torch/extension.h>
#include <cstdio>
#include <sstream>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
namespace {
// A little structure for holding details about a pixel.
struct Pix {
float z; // Depth of the reference point.
int32_t idx; // Index of the reference point.
float dist2; // Euclidean distance square to the reference point.
};
__device__ inline bool operator<(const Pix& a, const Pix& b) {
return a.z < b.z;
}
// This function checks if a pixel given by xy location pxy lies within the
// point with index p and batch index n. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the points which intersect
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
// point, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizePointsNaiveCudaKernel and
// RasterizePointsFineCudaKernel.
template <typename PointQ>
__device__ void CheckPixelInsidePoint(
const float* points, // (P, 3)
const int p_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
PointQ& q,
const float radius2,
const float xf,
const float yf,
const int K) {
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
if (pz < 0)
return; // Don't render points behind the camera
const float dx = xf - px;
const float dy = yf - py;
const float dist2 = dx * dx + dy * dy;
if (dist2 < radius2) {
if (q_size < K) {
// Just insert it
q[q_size] = {pz, p_idx, dist2};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max
q[q_max_idx] = {pz, p_idx, dist2};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsNaiveCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float radius,
const int N,
const int S,
const int K,
int32_t* point_idxs, // (N, S, S, K)
float* zbuf, // (N, S, S, K)
float* pix_dists) { // (N, S, S, K)
// Simple version: One thread per output pixel
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float radius2 = radius * radius;
for (int i = tid; i < N * S * S; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (S * S); // Batch index
const int pix_idx = i % (S * S);
// Reverse ordering of X and Y axes.
const int yi = S - 1 - pix_idx / S;
const int xi = S - 1 - pix_idx % S;
const float xf = PixToNdc(xi, S);
const float yf = PixToNdc(yi, S);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
// TODO(jcjohns) Abstract this out into a standalone data structure
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t point_start_idx = cloud_to_packed_first_idx[n];
const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n];
for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) {
CheckPixelInsidePoint(
points, p_idx, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K);
}
BubbleSort(q, q_size);
int idx = n * S * S * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist2;
}
}
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
RasterizePointsNaiveCuda(
const torch::Tensor& points, // (P. 3)
const torch::Tensor& cloud_to_packed_first_idx, // (N)
const torch::Tensor& num_points_per_cloud, // (N)
const int image_size,
const float radius,
const int points_per_pixel) {
if (points.ndimension() != 2 || points.size(1) != 3) {
AT_ERROR("points must have dimensions (num_points, 3)");
}
if (num_points_per_cloud.size(0) != cloud_to_packed_first_idx.size(0)) {
AT_ERROR(
"num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx");
}
const int N = num_points_per_cloud.size(0); // batch size.
const int S = image_size;
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
auto int_opts = points.options().dtype(torch::kInt32);
auto float_opts = points.options().dtype(torch::kFloat32);
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsNaiveCudaKernel), dim3(blocks), dim3(threads), 0, 0,
points.contiguous().data<float>(),
cloud_to_packed_first_idx.contiguous().data<int64_t>(),
num_points_per_cloud.contiguous().data<int64_t>(),
radius,
N,
S,
K,
point_idxs.contiguous().data<int32_t>(),
zbuf.contiguous().data<float>(),
pix_dists.contiguous().data<float>());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsCoarseCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float radius,
const int N,
const int P,
const int S,
const int bin_size,
const int chunk_size,
const int max_points_per_bin,
int* points_per_bin,
int* bin_points) {
extern __shared__ char sbuf[];
const int M = max_points_per_bin;
const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up
const float half_pix = 1.0f / S; // Size of half a pixel in NDC units
// This is a boolean array of shape (num_bins, num_bins, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size);
// Have each block handle a chunk of points and build a 3D bitmask in
// shared memory to mark which points hit which bins. In this first phase,
// each thread processes one point at a time. After processing the chunk,
// one thread is assigned per bin, and the thread counts and writes the
// points for the bin out to global memory.
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch;
const int chunk_idx = chunk % chunks_per_batch;
const int point_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx];
const int64_t cloud_point_stop_idx =
cloud_point_start_idx + num_points_per_cloud[batch_idx];
// Have each thread handle a different point within the chunk
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
const int p_idx = point_start_idx + p;
// Check if point index corresponds to the cloud in the batch given by
// batch_idx.
if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) {
continue;
}
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
if (pz < 0)
continue; // Don't render points behind the camera.
const float px0 = px - radius;
const float px1 = px + radius;
const float py0 = py - radius;
const float py1 = py + radius;
// Brute-force search over all bins; TODO something smarter?
// For example we could compute the exact bin where the point falls,
// then check neighboring bins. This way we wouldn't have to check
// all bins (however then we might have more warp divergence?)
for (int by = 0; by < num_bins; ++by) {
// Get y extent for the bin. PixToNdc gives us the location of
// the center of each pixel, so we need to add/subtract a half
// pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const int yidx = num_bins - by;
const float bin_y_max = PixToNdc(yidx * bin_size - 1, S) + half_pix;
const float bin_y_min = PixToNdc((yidx - 1) * bin_size, S) - half_pix;
const bool y_overlap = (py0 <= bin_y_max) && (bin_y_min <= py1);
if (!y_overlap) {
continue;
}
for (int bx = 0; bx < num_bins; ++bx) {
// Get x extent for the bin; again we need to adjust the
// output of PixToNdc by half a pixel.
// Reverse ordering of x axis so that +X is left.
const int xidx = num_bins - bx;
const float bin_x_max = PixToNdc(xidx * bin_size - 1, S) + half_pix;
const float bin_x_min = PixToNdc((xidx - 1) * bin_size, S) - half_pix;
const bool x_overlap = (px0 <= bin_x_max) && (bin_x_min <= px1);
if (x_overlap) {
binmask.set(by, bx, p);
}
}
}
}
__syncthreads();
// Now we have processed every point in the current chunk. We need to
// count the number of points in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) {
const int by = byx / num_bins;
const int bx = byx % num_bins;
const int count = binmask.count(by, bx);
const int points_per_bin_idx =
batch_idx * num_bins * num_bins + by * num_bins + bx;
// This atomically increments the (global) number of points found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_points array for the
// points in the current chunk that fall into this bin.
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_points.
int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M +
bx * M + start;
for (int p = 0; p < chunk_size; ++p) {
if (binmask.get(by, bx, p)) {
// TODO: Throw an error if next_idx >= M -- this means that
// we got more than max_points_per_bin in this bin
// TODO: check if atomicAdd is needed in line 265.
bin_points[next_idx] = point_start_idx + p;
next_idx++;
}
}
}
__syncthreads();
}
}
torch::Tensor RasterizePointsCoarseCuda(
const torch::Tensor& points, // (P, 3)
const torch::Tensor& cloud_to_packed_first_idx, // (N)
const torch::Tensor& num_points_per_cloud, // (N)
const int image_size,
const float radius,
const int bin_size,
const int max_points_per_bin) {
const int P = points.size(0);
const int N = num_points_per_cloud.size(0);
const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up
const int M = max_points_per_bin;
if (points.ndimension() != 2 || points.size(1) != 3) {
AT_ERROR("points must have dimensions (num_points, 3)");
}
if (num_bins >= 22) {
// Make sure we do not use too much shared memory.
std::stringstream ss;
ss << "Got " << num_bins << "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = points.options().dtype(torch::kInt32);
torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts);
torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts);
const int chunk_size = 512;
const size_t shared_size = num_bins * num_bins * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
hipLaunchKernelGGL(( RasterizePointsCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, 0,
points.contiguous().data<float>(),
cloud_to_packed_first_idx.contiguous().data<int64_t>(),
num_points_per_cloud.contiguous().data<int64_t>(),
radius,
N,
P,
image_size,
bin_size,
chunk_size,
M,
points_per_bin.contiguous().data<int32_t>(),
bin_points.contiguous().data<int32_t>());
return bin_points;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsFineCudaKernel(
const float* points, // (P, 3)
const int32_t* bin_points, // (N, B, B, T)
const float radius,
const int bin_size,
const int N,
const int B,
const int M,
const int S,
const int K,
int32_t* point_idxs, // (N, S, S, K)
float* zbuf, // (N, S, S, K)
float* pix_dists) { // (N, S, S, K)
// This can be more than S^2 if S is not dividable by bin_size.
const int num_pixels = N * B * B * bin_size * bin_size;
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const float radius2 = radius * radius;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from points and bin_points.
int i = pid;
const int n = i / (B * B * bin_size * bin_size);
i %= B * B * bin_size * bin_size;
const int by = i / (B * bin_size * bin_size);
i %= B * bin_size * bin_size;
const int bx = i / (bin_size * bin_size);
i %= bin_size * bin_size;
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= S || xi >= S)
continue;
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = S - 1 - yi;
const int xidx = S - 1 - xi;
const float xf = PixToNdc(xidx, S);
const float yf = PixToNdc(yidx, S);
// This part looks like the naive rasterization kernel, except we use
// bin_points to only look at a subset of points already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; ++m) {
const int p = bin_points[n * B * B * M + by * B * M + bx * M + m];
if (p < 0) {
// bin_points uses -1 as a sentinal value
continue;
}
CheckPixelInsidePoint(
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K);
}
// Now we've looked at all the points for this bin, so we can write
// output for the current pixel.
BubbleSort(q, q_size);
const int pix_idx = n * S * S * K + yi * S * K + xi * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist2;
}
}
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
const torch::Tensor& points, // (P, 3)
const torch::Tensor& bin_points,
const int image_size,
const float radius,
const int bin_size,
const int points_per_pixel) {
const int N = bin_points.size(0);
const int B = bin_points.size(1);
const int M = bin_points.size(3);
const int S = image_size;
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 8");
}
auto int_opts = points.options().dtype(torch::kInt32);
auto float_opts = points.options().dtype(torch::kFloat32);
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsFineCudaKernel), dim3(blocks), dim3(threads), 0, 0,
points.contiguous().data<float>(),
bin_points.contiguous().data<int32_t>(),
radius,
bin_size,
N,
B,
M,
S,
K,
point_idxs.contiguous().data<int32_t>(),
zbuf.contiguous().data<float>(),
pix_dists.contiguous().data<float>());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO(T55115174) Add more documentation for backward kernel.
__global__ void RasterizePointsBackwardCudaKernel(
const float* points, // (P, 3)
const int32_t* idxs, // (N, H, W, K)
const int N,
const int P,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_dists, // (N, H, W, K)
float* grad_points) { // (P, 3)
// Parallelized over each of K points per pixel, for each pixel in images of
// size H * W, for each image in the batch of size N.
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N * H * W * K; i += num_threads) {
// const int n = i / (H * W * K); // batch index (not needed).
const int yxk = i % (H * W * K);
const int yi = yxk / (W * K);
const int xk = yxk % (W * K);
const int xi = xk / K;
// k = xk % K (We don't actually need k, but this would be it.)
// Reverse ordering of X and Y axes.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const float xf = PixToNdc(xidx, W);
const float yf = PixToNdc(yidx, H);
const int p = idxs[i];
if (p < 0)
continue;
const float grad_dist2 = grad_dists[i];
const int p_ind = p * 3; // index into packed points tensor
const float px = points[p_ind + 0];
const float py = points[p_ind + 1];
const float dx = px - xf;
const float dy = py - yf;
const float grad_px = 2.0f * grad_dist2 * dx;
const float grad_py = 2.0f * grad_dist2 * dy;
const float grad_pz = grad_zbuf[i];
atomicAdd(grad_points + p_ind + 0, grad_px);
atomicAdd(grad_points + p_ind + 1, grad_py);
atomicAdd(grad_points + p_ind + 2, grad_pz);
}
}
torch::Tensor RasterizePointsBackwardCuda(
const torch::Tensor& points, // (N, P, 3)
const torch::Tensor& idxs, // (N, H, W, K)
const torch::Tensor& grad_zbuf, // (N, H, W, K)
const torch::Tensor& grad_dists) { // (N, H, W, K)
const int P = points.size(0);
const int N = idxs.size(0);
const int H = idxs.size(1);
const int W = idxs.size(2);
const int K = idxs.size(3);
torch::Tensor grad_points = torch::zeros({P, 3}, points.options());
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsBackwardCudaKernel), dim3(blocks), dim3(threads), 0, 0,
points.contiguous().data<float>(),
idxs.contiguous().data<int32_t>(),
N,
P,
H,
W,
K,
grad_zbuf.contiguous().data<float>(),
grad_dists.contiguous().data<float>(),
grad_points.contiguous().data<float>());
return grad_points;
}
|
fa8745201e39ac7c5db1ed9b8797b1cb49ba2c75.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <math.h>
#include <torch/extension.h>
#include <cstdio>
#include <sstream>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
namespace {
// A little structure for holding details about a pixel.
struct Pix {
float z; // Depth of the reference point.
int32_t idx; // Index of the reference point.
float dist2; // Euclidean distance square to the reference point.
};
__device__ inline bool operator<(const Pix& a, const Pix& b) {
return a.z < b.z;
}
// This function checks if a pixel given by xy location pxy lies within the
// point with index p and batch index n. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the points which intersect
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
// point, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizePointsNaiveCudaKernel and
// RasterizePointsFineCudaKernel.
template <typename PointQ>
__device__ void CheckPixelInsidePoint(
const float* points, // (P, 3)
const int p_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
PointQ& q,
const float radius2,
const float xf,
const float yf,
const int K) {
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
if (pz < 0)
return; // Don't render points behind the camera
const float dx = xf - px;
const float dy = yf - py;
const float dist2 = dx * dx + dy * dy;
if (dist2 < radius2) {
if (q_size < K) {
// Just insert it
q[q_size] = {pz, p_idx, dist2};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max
q[q_max_idx] = {pz, p_idx, dist2};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsNaiveCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float radius,
const int N,
const int S,
const int K,
int32_t* point_idxs, // (N, S, S, K)
float* zbuf, // (N, S, S, K)
float* pix_dists) { // (N, S, S, K)
// Simple version: One thread per output pixel
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const float radius2 = radius * radius;
for (int i = tid; i < N * S * S; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (S * S); // Batch index
const int pix_idx = i % (S * S);
// Reverse ordering of X and Y axes.
const int yi = S - 1 - pix_idx / S;
const int xi = S - 1 - pix_idx % S;
const float xf = PixToNdc(xi, S);
const float yf = PixToNdc(yi, S);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
// TODO(jcjohns) Abstract this out into a standalone data structure
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t point_start_idx = cloud_to_packed_first_idx[n];
const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n];
for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) {
CheckPixelInsidePoint(
points, p_idx, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K);
}
BubbleSort(q, q_size);
int idx = n * S * S * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist2;
}
}
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
RasterizePointsNaiveCuda(
const torch::Tensor& points, // (P. 3)
const torch::Tensor& cloud_to_packed_first_idx, // (N)
const torch::Tensor& num_points_per_cloud, // (N)
const int image_size,
const float radius,
const int points_per_pixel) {
if (points.ndimension() != 2 || points.size(1) != 3) {
AT_ERROR("points must have dimensions (num_points, 3)");
}
if (num_points_per_cloud.size(0) != cloud_to_packed_first_idx.size(0)) {
AT_ERROR(
"num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx");
}
const int N = num_points_per_cloud.size(0); // batch size.
const int S = image_size;
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
auto int_opts = points.options().dtype(torch::kInt32);
auto float_opts = points.options().dtype(torch::kFloat32);
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsNaiveCudaKernel<<<blocks, threads>>>(
points.contiguous().data<float>(),
cloud_to_packed_first_idx.contiguous().data<int64_t>(),
num_points_per_cloud.contiguous().data<int64_t>(),
radius,
N,
S,
K,
point_idxs.contiguous().data<int32_t>(),
zbuf.contiguous().data<float>(),
pix_dists.contiguous().data<float>());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsCoarseCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float radius,
const int N,
const int P,
const int S,
const int bin_size,
const int chunk_size,
const int max_points_per_bin,
int* points_per_bin,
int* bin_points) {
extern __shared__ char sbuf[];
const int M = max_points_per_bin;
const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up
const float half_pix = 1.0f / S; // Size of half a pixel in NDC units
// This is a boolean array of shape (num_bins, num_bins, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size);
// Have each block handle a chunk of points and build a 3D bitmask in
// shared memory to mark which points hit which bins. In this first phase,
// each thread processes one point at a time. After processing the chunk,
// one thread is assigned per bin, and the thread counts and writes the
// points for the bin out to global memory.
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch;
const int chunk_idx = chunk % chunks_per_batch;
const int point_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx];
const int64_t cloud_point_stop_idx =
cloud_point_start_idx + num_points_per_cloud[batch_idx];
// Have each thread handle a different point within the chunk
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
const int p_idx = point_start_idx + p;
// Check if point index corresponds to the cloud in the batch given by
// batch_idx.
if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) {
continue;
}
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
if (pz < 0)
continue; // Don't render points behind the camera.
const float px0 = px - radius;
const float px1 = px + radius;
const float py0 = py - radius;
const float py1 = py + radius;
// Brute-force search over all bins; TODO something smarter?
// For example we could compute the exact bin where the point falls,
// then check neighboring bins. This way we wouldn't have to check
// all bins (however then we might have more warp divergence?)
for (int by = 0; by < num_bins; ++by) {
// Get y extent for the bin. PixToNdc gives us the location of
// the center of each pixel, so we need to add/subtract a half
// pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const int yidx = num_bins - by;
const float bin_y_max = PixToNdc(yidx * bin_size - 1, S) + half_pix;
const float bin_y_min = PixToNdc((yidx - 1) * bin_size, S) - half_pix;
const bool y_overlap = (py0 <= bin_y_max) && (bin_y_min <= py1);
if (!y_overlap) {
continue;
}
for (int bx = 0; bx < num_bins; ++bx) {
// Get x extent for the bin; again we need to adjust the
// output of PixToNdc by half a pixel.
// Reverse ordering of x axis so that +X is left.
const int xidx = num_bins - bx;
const float bin_x_max = PixToNdc(xidx * bin_size - 1, S) + half_pix;
const float bin_x_min = PixToNdc((xidx - 1) * bin_size, S) - half_pix;
const bool x_overlap = (px0 <= bin_x_max) && (bin_x_min <= px1);
if (x_overlap) {
binmask.set(by, bx, p);
}
}
}
}
__syncthreads();
// Now we have processed every point in the current chunk. We need to
// count the number of points in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) {
const int by = byx / num_bins;
const int bx = byx % num_bins;
const int count = binmask.count(by, bx);
const int points_per_bin_idx =
batch_idx * num_bins * num_bins + by * num_bins + bx;
// This atomically increments the (global) number of points found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_points array for the
// points in the current chunk that fall into this bin.
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_points.
int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M +
bx * M + start;
for (int p = 0; p < chunk_size; ++p) {
if (binmask.get(by, bx, p)) {
// TODO: Throw an error if next_idx >= M -- this means that
// we got more than max_points_per_bin in this bin
// TODO: check if atomicAdd is needed in line 265.
bin_points[next_idx] = point_start_idx + p;
next_idx++;
}
}
}
__syncthreads();
}
}
torch::Tensor RasterizePointsCoarseCuda(
const torch::Tensor& points, // (P, 3)
const torch::Tensor& cloud_to_packed_first_idx, // (N)
const torch::Tensor& num_points_per_cloud, // (N)
const int image_size,
const float radius,
const int bin_size,
const int max_points_per_bin) {
const int P = points.size(0);
const int N = num_points_per_cloud.size(0);
const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up
const int M = max_points_per_bin;
if (points.ndimension() != 2 || points.size(1) != 3) {
AT_ERROR("points must have dimensions (num_points, 3)");
}
if (num_bins >= 22) {
// Make sure we do not use too much shared memory.
std::stringstream ss;
ss << "Got " << num_bins << "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = points.options().dtype(torch::kInt32);
torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts);
torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts);
const int chunk_size = 512;
const size_t shared_size = num_bins * num_bins * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size>>>(
points.contiguous().data<float>(),
cloud_to_packed_first_idx.contiguous().data<int64_t>(),
num_points_per_cloud.contiguous().data<int64_t>(),
radius,
N,
P,
image_size,
bin_size,
chunk_size,
M,
points_per_bin.contiguous().data<int32_t>(),
bin_points.contiguous().data<int32_t>());
return bin_points;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsFineCudaKernel(
const float* points, // (P, 3)
const int32_t* bin_points, // (N, B, B, T)
const float radius,
const int bin_size,
const int N,
const int B,
const int M,
const int S,
const int K,
int32_t* point_idxs, // (N, S, S, K)
float* zbuf, // (N, S, S, K)
float* pix_dists) { // (N, S, S, K)
// This can be more than S^2 if S is not dividable by bin_size.
const int num_pixels = N * B * B * bin_size * bin_size;
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const float radius2 = radius * radius;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from points and bin_points.
int i = pid;
const int n = i / (B * B * bin_size * bin_size);
i %= B * B * bin_size * bin_size;
const int by = i / (B * bin_size * bin_size);
i %= B * bin_size * bin_size;
const int bx = i / (bin_size * bin_size);
i %= bin_size * bin_size;
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= S || xi >= S)
continue;
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = S - 1 - yi;
const int xidx = S - 1 - xi;
const float xf = PixToNdc(xidx, S);
const float yf = PixToNdc(yidx, S);
// This part looks like the naive rasterization kernel, except we use
// bin_points to only look at a subset of points already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; ++m) {
const int p = bin_points[n * B * B * M + by * B * M + bx * M + m];
if (p < 0) {
// bin_points uses -1 as a sentinal value
continue;
}
CheckPixelInsidePoint(
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, K);
}
// Now we've looked at all the points for this bin, so we can write
// output for the current pixel.
BubbleSort(q, q_size);
const int pix_idx = n * S * S * K + yi * S * K + xi * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist2;
}
}
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
const torch::Tensor& points, // (P, 3)
const torch::Tensor& bin_points,
const int image_size,
const float radius,
const int bin_size,
const int points_per_pixel) {
const int N = bin_points.size(0);
const int B = bin_points.size(1);
const int M = bin_points.size(3);
const int S = image_size;
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 8");
}
auto int_opts = points.options().dtype(torch::kInt32);
auto float_opts = points.options().dtype(torch::kFloat32);
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsFineCudaKernel<<<blocks, threads>>>(
points.contiguous().data<float>(),
bin_points.contiguous().data<int32_t>(),
radius,
bin_size,
N,
B,
M,
S,
K,
point_idxs.contiguous().data<int32_t>(),
zbuf.contiguous().data<float>(),
pix_dists.contiguous().data<float>());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO(T55115174) Add more documentation for backward kernel.
__global__ void RasterizePointsBackwardCudaKernel(
const float* points, // (P, 3)
const int32_t* idxs, // (N, H, W, K)
const int N,
const int P,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_dists, // (N, H, W, K)
float* grad_points) { // (P, 3)
// Parallelized over each of K points per pixel, for each pixel in images of
// size H * W, for each image in the batch of size N.
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N * H * W * K; i += num_threads) {
// const int n = i / (H * W * K); // batch index (not needed).
const int yxk = i % (H * W * K);
const int yi = yxk / (W * K);
const int xk = yxk % (W * K);
const int xi = xk / K;
// k = xk % K (We don't actually need k, but this would be it.)
// Reverse ordering of X and Y axes.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const float xf = PixToNdc(xidx, W);
const float yf = PixToNdc(yidx, H);
const int p = idxs[i];
if (p < 0)
continue;
const float grad_dist2 = grad_dists[i];
const int p_ind = p * 3; // index into packed points tensor
const float px = points[p_ind + 0];
const float py = points[p_ind + 1];
const float dx = px - xf;
const float dy = py - yf;
const float grad_px = 2.0f * grad_dist2 * dx;
const float grad_py = 2.0f * grad_dist2 * dy;
const float grad_pz = grad_zbuf[i];
atomicAdd(grad_points + p_ind + 0, grad_px);
atomicAdd(grad_points + p_ind + 1, grad_py);
atomicAdd(grad_points + p_ind + 2, grad_pz);
}
}
torch::Tensor RasterizePointsBackwardCuda(
const torch::Tensor& points, // (N, P, 3)
const torch::Tensor& idxs, // (N, H, W, K)
const torch::Tensor& grad_zbuf, // (N, H, W, K)
const torch::Tensor& grad_dists) { // (N, H, W, K)
const int P = points.size(0);
const int N = idxs.size(0);
const int H = idxs.size(1);
const int W = idxs.size(2);
const int K = idxs.size(3);
torch::Tensor grad_points = torch::zeros({P, 3}, points.options());
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsBackwardCudaKernel<<<blocks, threads>>>(
points.contiguous().data<float>(),
idxs.contiguous().data<int32_t>(),
N,
P,
H,
W,
K,
grad_zbuf.contiguous().data<float>(),
grad_dists.contiguous().data<float>(),
grad_points.contiguous().data<float>());
return grad_points;
}
|
dec0f72a7f9e56cdd54eb0d1183e882eb8db818a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <unistd.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
//#define GLOBAL_MEM_ELEMENTS 131072
//#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) {
unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
//#define ONCE tmp_ptr = *(void**)tmp_ptr;
#define ONCE tmp_ptr = (void**)(*tmp_ptr);
#define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE
#define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES
#define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES
if ((threadIdx.x % 32) < divergence) {
for(k = 0; k <= iterations; k++) {
// tmp_ptr = (void**)(*tmp_ptr);
if (k == 0) {
sum_time = 0;
}
start_time = clock();
// ONCE
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_FOUR_TIMES;
REPEAT_FOUR_TIMES;
end_time = clock();
sum_time += (end_time - start_time);
}
}
my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr);
duration[tid] = sum_time;
}
// Shared memory array size is N-2. Last two elements are used as dummy variables.
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long ** h_end_ptr_a;
unsigned long long ** d_end_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
for (int i = 0; i < 1; i++) {
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
if (latency[i] > 0) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
}
printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf("%f\n", time);
}
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(d_end_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(h_end_ptr_a);
free(latency);
}
void usage() {
printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n");
}
int main(int argc, char **argv) {
int N, stride;
// initialize upper bounds here
// int stride_upper_bound = 1;
if(argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
stride = atoi(argv[5]);
// printf("Shared memory latency for varying stride.\n");
// printf("stride (bytes), latency (clocks)\n");
// N = SHARED_MEM_ELEMENTS;
N = GLOBAL_MEM_ELEMENTS;
// N = num_threads_per_block;
// stride_upper_bound = 1;
// for (stride = 1; stride <= stride_upper_bound; stride += 1) {
parametric_measure_shared(N, 10, stride);
// }
return 0;
}
|
dec0f72a7f9e56cdd54eb0d1183e882eb8db818a.cu
|
#include <stdio.h>
#include <cuda_profiler_api.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
//#define GLOBAL_MEM_ELEMENTS 131072
//#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) {
unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
//#define ONCE tmp_ptr = *(void**)tmp_ptr;
#define ONCE tmp_ptr = (void**)(*tmp_ptr);
#define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE
#define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES
#define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES
if ((threadIdx.x % 32) < divergence) {
for(k = 0; k <= iterations; k++) {
// tmp_ptr = (void**)(*tmp_ptr);
if (k == 0) {
sum_time = 0;
}
start_time = clock();
// ONCE
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_SIXTYFOUR_TIMES;
REPEAT_FOUR_TIMES;
REPEAT_FOUR_TIMES;
end_time = clock();
sum_time += (end_time - start_time);
}
}
my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr);
duration[tid] = sum_time;
}
// Shared memory array size is N-2. Last two elements are used as dummy variables.
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long ** h_end_ptr_a;
unsigned long long ** d_end_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
for (int i = 0; i < 1; i++) {
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
if (latency[i] > 0) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
}
printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time);
//printf("%f\n", time);
}
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(d_end_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(h_end_ptr_a);
free(latency);
}
void usage() {
printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n");
}
int main(int argc, char **argv) {
int N, stride;
// initialize upper bounds here
// int stride_upper_bound = 1;
if(argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
stride = atoi(argv[5]);
// printf("Shared memory latency for varying stride.\n");
// printf("stride (bytes), latency (clocks)\n");
// N = SHARED_MEM_ELEMENTS;
N = GLOBAL_MEM_ELEMENTS;
// N = num_threads_per_block;
// stride_upper_bound = 1;
// for (stride = 1; stride <= stride_upper_bound; stride += 1) {
parametric_measure_shared(N, 10, stride);
// }
return 0;
}
|
b3fbbe1bc5fe9621ab817aaa501088e6ff780d4c.hip
|
// !!! This is a file automatically generated by hipify!!!
// Runcmd : /usr/local/cuda/bin/nvcc cublas_matmul.cu -o matmul -lcublas -lcurand -std=c++14
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <chrono>
#define MATRIX_M 1024
#define MATRIX_N 512
#define MATRIX_K 256
#define DATATYPE float
#define EPSILON 1e-2
// Is input tranposed => row-majow
// cublas always assumes column major matrices by default
#define A_T false
#define B_T false
#define C_T false
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit(-1);}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(-1);}} while(0)
#define CUBLAS_CALL(x) do { if((x)!=HIPBLAS_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(-1);}} while(0)
// Fill Values using hiprand
void init_vals(DATATYPE *in, int N)
{
hiprandGenerator_t prng;
CURAND_CALL( hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT) );
CURAND_CALL( hiprandSetPseudoRandomGeneratorSeed(prng, 1234ULL) );
CURAND_CALL( hiprandGenerateUniform(prng, in, N) );
CURAND_CALL( hiprandDestroyGenerator(prng) );
}
// Cublas call
float cublas_matmul(const DATATYPE *A, const DATATYPE *B, DATATYPE *C, const int m, const int n, const int k)
{
// Events to measure performance
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int lda = A_T ? k : m;
int ldb = B_T ? n : k;
int ldc = C_T ? n : m;
const DATATYPE alpha = 1;
const DATATYPE beta = 0;
// STEP 1: Create cuBLAS Handle
hipblasHandle_t handle;
CUBLAS_CALL( hipblasCreate(&handle) );
// STEP 2 : Call cuBLAS command
hipEventRecord(start);
if( A_T ) {
if( B_T ) {
CUBLAS_CALL( hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
} else {
CUBLAS_CALL( hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
}
} else{
if( B_T ) {
CUBLAS_CALL( hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
} else {
CUBLAS_CALL( hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
// STEP 3 : Destroy Handle
CUBLAS_CALL( hipblasDestroy(handle) );
return ms;
}
// CPU Verification
inline int get_leading_dimension(int num_rows, int num_cols, bool is_row_major){
return is_row_major ? num_cols : num_rows;
}
DATATYPE& get_matrix_ref(DATATYPE* matrix, int row, int col, int num_rows, int num_cols, bool is_row_major = false)
{
int leading_dim = get_leading_dimension(num_rows, num_cols, is_row_major);
if( is_row_major ) {
return matrix[ row * leading_dim + col];
} else {
return matrix[ col * leading_dim + row];
}
}
DATATYPE get_matrix_val(const DATATYPE* matrix, int row, int col, int num_rows, int num_cols, bool is_row_major = false)
{
int leading_dim = get_leading_dimension(num_rows, num_cols, is_row_major);
if( is_row_major ) {
return matrix[ row * leading_dim + col];
} else {
return matrix[ col * leading_dim + row];
}
}
DATATYPE cpu_verify(const DATATYPE *A, const DATATYPE *B, DATATYPE *C, const int m_, const int n_, const int k_)
{
auto cpu_start = std::chrono::steady_clock::now();
for (int row = 0; row < m_; row++) {
for (int col = 0; col < n_; col++) {
DATATYPE& out_c = get_matrix_ref(C, row, col, m_, n_, C_T);
out_c = 0;
for (int ki = 0; ki < k_; ki++) {
DATATYPE in_a = get_matrix_val(A, row, ki, m_, k_, A_T);
DATATYPE in_b = get_matrix_val(B, ki, col, k_, n_, B_T);
out_c += in_a * in_b;
}
}
}
printf("------\n");
auto cpu_end = std::chrono::steady_clock::now();
float cpu_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count() * 1e-6;
return cpu_ms;
}
// Always manages to print in a row-major layout
void print_matrix(const DATATYPE *mat, int num_rows, int num_cols, bool is_row_major = false)
{
for (int row = 0; row < num_rows; row++) {
for (int col = 0; col < num_cols; col++) {
float val = get_matrix_val(mat, row, col, num_rows, num_cols, is_row_major);
std::cout<< val << " ";
}
std::cout << ";\n";
}
}
int main()
{
// Declare device side vectors
thrust::device_vector<DATATYPE> d_A(MATRIX_M * MATRIX_K);
thrust::device_vector<DATATYPE> d_B(MATRIX_K * MATRIX_N);
thrust::device_vector<DATATYPE> d_C(MATRIX_M * MATRIX_N);
// Initialize values using hiprand
init_vals(thrust::raw_pointer_cast(d_A.data()), MATRIX_M * MATRIX_K);
init_vals(thrust::raw_pointer_cast(d_B.data()), MATRIX_K * MATRIX_N);
// Perform Matrix Multiply on the GPU
float gpu_time = cublas_matmul(thrust::raw_pointer_cast(d_A.data()),
thrust::raw_pointer_cast(d_B.data()),
thrust::raw_pointer_cast(d_C.data()),
MATRIX_M, MATRIX_N, MATRIX_K);
// Declare host vectors
thrust::host_vector<DATATYPE> h_A(MATRIX_M * MATRIX_K);
thrust::host_vector<DATATYPE> h_B(MATRIX_K * MATRIX_N);
thrust::host_vector<DATATYPE> h_C(MATRIX_M * MATRIX_N);
thrust::host_vector<DATATYPE> h_C_computed(MATRIX_M * MATRIX_N);
// Copy device data to host
h_A = d_A;
h_B = d_B;
h_C_computed = d_C;
// Verify operation on the CPU
float cpu_time = cpu_verify( thrust::raw_pointer_cast(h_A.data()),
thrust::raw_pointer_cast(h_B.data()),
thrust::raw_pointer_cast(h_C.data()),
MATRIX_M, MATRIX_N, MATRIX_K);
for(int i = 0; i < MATRIX_M * MATRIX_N; i++){
if (abs(h_C[i] - h_C_computed[i]) > EPSILON) {
std::cout << "Mismatch at " << i << " Expected = " << h_C[i] << " Actual = " << h_C_computed[i] << std::endl;
std::cout << "A :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_A.data()), MATRIX_M, MATRIX_K, A_T);
std::cout << "B :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_B.data()), MATRIX_K, MATRIX_N, B_T);
std::cout << "C Ref :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_C.data()), MATRIX_M, MATRIX_N, C_T);
std::cout << "C Computed :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_C_computed.data()), MATRIX_M, MATRIX_N, C_T);
break;
}
}
std::cout << "TEST COMPLETED \n"
<< "CPU Time : " << cpu_time << " ms\n"
<< "GPU TIme : " << gpu_time << " ms"
<< std::endl;
}
|
b3fbbe1bc5fe9621ab817aaa501088e6ff780d4c.cu
|
// Runcmd : /usr/local/cuda/bin/nvcc cublas_matmul.cu -o matmul -lcublas -lcurand -std=c++14
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <curand.h>
#include <cublas_v2.h>
#include <chrono>
#define MATRIX_M 1024
#define MATRIX_N 512
#define MATRIX_K 256
#define DATATYPE float
#define EPSILON 1e-2
// Is input tranposed => row-majow
// cublas always assumes column major matrices by default
#define A_T false
#define B_T false
#define C_T false
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit(-1);}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(-1);}} while(0)
#define CUBLAS_CALL(x) do { if((x)!=CUBLAS_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(-1);}} while(0)
// Fill Values using curand
void init_vals(DATATYPE *in, int N)
{
curandGenerator_t prng;
CURAND_CALL( curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT) );
CURAND_CALL( curandSetPseudoRandomGeneratorSeed(prng, 1234ULL) );
CURAND_CALL( curandGenerateUniform(prng, in, N) );
CURAND_CALL( curandDestroyGenerator(prng) );
}
// Cublas call
float cublas_matmul(const DATATYPE *A, const DATATYPE *B, DATATYPE *C, const int m, const int n, const int k)
{
// Events to measure performance
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int lda = A_T ? k : m;
int ldb = B_T ? n : k;
int ldc = C_T ? n : m;
const DATATYPE alpha = 1;
const DATATYPE beta = 0;
// STEP 1: Create cuBLAS Handle
cublasHandle_t handle;
CUBLAS_CALL( cublasCreate(&handle) );
// STEP 2 : Call cuBLAS command
cudaEventRecord(start);
if( A_T ) {
if( B_T ) {
CUBLAS_CALL( cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
} else {
CUBLAS_CALL( cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
}
} else{
if( B_T ) {
CUBLAS_CALL( cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
} else {
CUBLAS_CALL( cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc) );
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
// STEP 3 : Destroy Handle
CUBLAS_CALL( cublasDestroy(handle) );
return ms;
}
// CPU Verification
inline int get_leading_dimension(int num_rows, int num_cols, bool is_row_major){
return is_row_major ? num_cols : num_rows;
}
DATATYPE& get_matrix_ref(DATATYPE* matrix, int row, int col, int num_rows, int num_cols, bool is_row_major = false)
{
int leading_dim = get_leading_dimension(num_rows, num_cols, is_row_major);
if( is_row_major ) {
return matrix[ row * leading_dim + col];
} else {
return matrix[ col * leading_dim + row];
}
}
DATATYPE get_matrix_val(const DATATYPE* matrix, int row, int col, int num_rows, int num_cols, bool is_row_major = false)
{
int leading_dim = get_leading_dimension(num_rows, num_cols, is_row_major);
if( is_row_major ) {
return matrix[ row * leading_dim + col];
} else {
return matrix[ col * leading_dim + row];
}
}
DATATYPE cpu_verify(const DATATYPE *A, const DATATYPE *B, DATATYPE *C, const int m_, const int n_, const int k_)
{
auto cpu_start = std::chrono::steady_clock::now();
for (int row = 0; row < m_; row++) {
for (int col = 0; col < n_; col++) {
DATATYPE& out_c = get_matrix_ref(C, row, col, m_, n_, C_T);
out_c = 0;
for (int ki = 0; ki < k_; ki++) {
DATATYPE in_a = get_matrix_val(A, row, ki, m_, k_, A_T);
DATATYPE in_b = get_matrix_val(B, ki, col, k_, n_, B_T);
out_c += in_a * in_b;
}
}
}
printf("------\n");
auto cpu_end = std::chrono::steady_clock::now();
float cpu_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count() * 1e-6;
return cpu_ms;
}
// Always manages to print in a row-major layout
void print_matrix(const DATATYPE *mat, int num_rows, int num_cols, bool is_row_major = false)
{
for (int row = 0; row < num_rows; row++) {
for (int col = 0; col < num_cols; col++) {
float val = get_matrix_val(mat, row, col, num_rows, num_cols, is_row_major);
std::cout<< val << " ";
}
std::cout << ";\n";
}
}
int main()
{
// Declare device side vectors
thrust::device_vector<DATATYPE> d_A(MATRIX_M * MATRIX_K);
thrust::device_vector<DATATYPE> d_B(MATRIX_K * MATRIX_N);
thrust::device_vector<DATATYPE> d_C(MATRIX_M * MATRIX_N);
// Initialize values using curand
init_vals(thrust::raw_pointer_cast(d_A.data()), MATRIX_M * MATRIX_K);
init_vals(thrust::raw_pointer_cast(d_B.data()), MATRIX_K * MATRIX_N);
// Perform Matrix Multiply on the GPU
float gpu_time = cublas_matmul(thrust::raw_pointer_cast(d_A.data()),
thrust::raw_pointer_cast(d_B.data()),
thrust::raw_pointer_cast(d_C.data()),
MATRIX_M, MATRIX_N, MATRIX_K);
// Declare host vectors
thrust::host_vector<DATATYPE> h_A(MATRIX_M * MATRIX_K);
thrust::host_vector<DATATYPE> h_B(MATRIX_K * MATRIX_N);
thrust::host_vector<DATATYPE> h_C(MATRIX_M * MATRIX_N);
thrust::host_vector<DATATYPE> h_C_computed(MATRIX_M * MATRIX_N);
// Copy device data to host
h_A = d_A;
h_B = d_B;
h_C_computed = d_C;
// Verify operation on the CPU
float cpu_time = cpu_verify( thrust::raw_pointer_cast(h_A.data()),
thrust::raw_pointer_cast(h_B.data()),
thrust::raw_pointer_cast(h_C.data()),
MATRIX_M, MATRIX_N, MATRIX_K);
for(int i = 0; i < MATRIX_M * MATRIX_N; i++){
if (abs(h_C[i] - h_C_computed[i]) > EPSILON) {
std::cout << "Mismatch at " << i << " Expected = " << h_C[i] << " Actual = " << h_C_computed[i] << std::endl;
std::cout << "A :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_A.data()), MATRIX_M, MATRIX_K, A_T);
std::cout << "B :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_B.data()), MATRIX_K, MATRIX_N, B_T);
std::cout << "C Ref :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_C.data()), MATRIX_M, MATRIX_N, C_T);
std::cout << "C Computed :" << std::endl;
print_matrix( thrust::raw_pointer_cast(h_C_computed.data()), MATRIX_M, MATRIX_N, C_T);
break;
}
}
std::cout << "TEST COMPLETED \n"
<< "CPU Time : " << cpu_time << " ms\n"
<< "GPU TIme : " << gpu_time << " ms"
<< std::endl;
}
|
99172081ed638b6ed6d7ad3c86b3a420c8816335.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernels.hip"
static int read_data(float *A0, int nx,int ny,int nz)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
A0[s] = 3; //fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}
nx = atoi(argv[1]);
if (nx<1)
return -1;
ny = atoi(argv[2]);
if (ny<1)
return -1;
nz = atoi(argv[3]);
if (nz<1)
return -1;
iteration = atoi(argv[4]);
if(iteration<1)
return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
read_data(h_A0, nx,ny,nz);
//memory allocation
hipMalloc((void **)&d_A0, size*sizeof(float));
hipMalloc((void **)&d_Anext, size*sizeof(float));
hipMemset(d_Anext,0,size*sizeof(float));
//memory copy
hipMemcpy(d_A0, h_A0, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Anext, d_A0, size*sizeof(float), hipMemcpyDeviceToDevice);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
for(int t=0;t<iteration;t++)
{
hipLaunchKernelGGL(( block2D_hybrid_coarsen_x), dim3(grid), dim3(block),sh_size, 0, c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
hipMemcpy(h_Anext, d_Anext,size*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_A0);
hipFree(d_Anext);
free (h_A0);
free (h_Anext);
return 0;
}
|
99172081ed638b6ed6d7ad3c86b3a420c8816335.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernels.cu"
static int read_data(float *A0, int nx,int ny,int nz)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
A0[s] = 3; //fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}
nx = atoi(argv[1]);
if (nx<1)
return -1;
ny = atoi(argv[2]);
if (ny<1)
return -1;
nz = atoi(argv[3]);
if (nz<1)
return -1;
iteration = atoi(argv[4]);
if(iteration<1)
return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
read_data(h_A0, nx,ny,nz);
//memory allocation
cudaMalloc((void **)&d_A0, size*sizeof(float));
cudaMalloc((void **)&d_Anext, size*sizeof(float));
cudaMemset(d_Anext,0,size*sizeof(float));
//memory copy
cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
for(int t=0;t<iteration;t++)
{
block2D_hybrid_coarsen_x<<<grid, block,sh_size>>>(c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_A0);
cudaFree(d_Anext);
free (h_A0);
free (h_Anext);
return 0;
}
|
c5edef39fd05feaf39b75b8b5d0639c4674aca0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "energyMinimizerNesterovAG.cuh"
/*! \file energyMinimizerNesterovAG.cu
\addtogroup updaterKernels
@{
*/
__global__ void gpu_nesterovAG_step_kernel(dVec *force,
dVec *position,
dVec *alternatePosition,
scalar deltaT,
scalar mu,
int N)
{
// read in the index that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
int pidx = idx/DIMENSION;
if(pidx>=N) return;
int didx = idx%DIMENSION;
scalar f = force[pidx][didx];
scalar oldAltPos = alternatePosition[pidx][didx];
alternatePosition[pidx][didx] = position[pidx][didx] + deltaT*force[pidx][didx];
position[pidx][didx] = alternatePosition[pidx][didx] + mu*(alternatePosition[pidx][didx] - oldAltPos);
}
/*!
A memory-efficiency optimization has each thread acting on one dimension of one degree of freedom...
*/
bool gpu_nesterovAG_step(dVec *force,
dVec *position,
dVec *alternatePosition,
scalar deltaT,
scalar mu,
int N,
int blockSize)
{
int block_size=blockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = DIMENSION*N/block_size + 1;
hipLaunchKernelGGL(( gpu_nesterovAG_step_kernel), dim3(nblocks),dim3(block_size), 0, 0, force,position,alternatePosition,
deltaT,mu,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
};
/** @} */ //end of group declaration
|
c5edef39fd05feaf39b75b8b5d0639c4674aca0a.cu
|
#include "energyMinimizerNesterovAG.cuh"
/*! \file energyMinimizerNesterovAG.cu
\addtogroup updaterKernels
@{
*/
__global__ void gpu_nesterovAG_step_kernel(dVec *force,
dVec *position,
dVec *alternatePosition,
scalar deltaT,
scalar mu,
int N)
{
// read in the index that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
int pidx = idx/DIMENSION;
if(pidx>=N) return;
int didx = idx%DIMENSION;
scalar f = force[pidx][didx];
scalar oldAltPos = alternatePosition[pidx][didx];
alternatePosition[pidx][didx] = position[pidx][didx] + deltaT*force[pidx][didx];
position[pidx][didx] = alternatePosition[pidx][didx] + mu*(alternatePosition[pidx][didx] - oldAltPos);
}
/*!
A memory-efficiency optimization has each thread acting on one dimension of one degree of freedom...
*/
bool gpu_nesterovAG_step(dVec *force,
dVec *position,
dVec *alternatePosition,
scalar deltaT,
scalar mu,
int N,
int blockSize)
{
int block_size=blockSize;
if (N < 128) block_size = 32;
unsigned int nblocks = DIMENSION*N/block_size + 1;
gpu_nesterovAG_step_kernel<<<nblocks,block_size>>>(force,position,alternatePosition,
deltaT,mu,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
};
/** @} */ //end of group declaration
|
2bb85b46783173d03cd37a952ba5ab2c9952e7c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include <limits>
#include <cstdlib>
#include "../utils.h"
#include "../timer.h"
__device__
int nextPowerOf2(const int x) {
return (1 << (32 - __clz(x - 1)));
}
template<int blockSize>
__device__
int blockReduce(const int threadVal, int *smem) {
const int tid = threadIdx.x;
smem[tid] = threadVal;
__syncthreads();
//use this for non-power of 2 blockSizes
for (int shift = nextPowerOf2(blockSize) / 2; shift > 0; shift >>= 1) {
if (tid < shift && tid + shift < blockSize) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
return smem[0];
}
template<int blockSize>
__global__
void reduceMultiBlock(const int* const input, int *sum, int N)
{
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockSize + threadIdx.x;
__shared__ int smem[blockSize];
int myVal = 0;
//first do a serial accumulation into each thread's local accumulator
for (int globalPos = gtid; globalPos < N; globalPos += blockSize * gridDim.x) {
myVal += input[globalPos];
}
int blockSum = blockReduce<blockSize>(myVal, smem);
if (tid == 0)
atomicAdd(sum, blockSum);
}
int main(int argc, char **argv) {
int N = atoi(argv[1]);
std::vector<int> h_input(N);
int h_sum = 0.f;
for (int i = 0; i < N; ++i) {
h_input[i] = rand() % 10;
h_sum += h_input[i];
}
int *d_input;
checkCudaErrors(hipMalloc(&d_input, N * sizeof(int)));
checkCudaErrors(hipMemcpy(d_input, &h_input[0], N * sizeof(int), hipMemcpyHostToDevice));
int *d_sum;
checkCudaErrors(hipMalloc(&d_sum, sizeof(int)));
checkCudaErrors(hipMemset(d_sum, 0, sizeof(int)));
const int blockSize = 192;
const int numBlocks = min( ((N+1)/2 + blockSize - 1) / blockSize, 512);
GpuTimer timer; timer.Start();
hipLaunchKernelGGL(( reduceMultiBlock<blockSize>), dim3(numBlocks), dim3(blockSize), 0, 0, d_input, d_sum, N);
timer.Stop();
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int h_d_sum;
checkCudaErrors(hipMemcpy(&h_d_sum, d_sum, sizeof(int), hipMemcpyDeviceToHost));
std::cout << "time: " << timer.Elapsed() << " ms" << std::endl;
std::cout << "Bandwidth: " << (N * sizeof(int) / 1E6) / timer.Elapsed() << std::endl;
std::cout << "cpu: " << h_sum << " gpu: " << h_d_sum << std::endl;
return 0;
}
|
2bb85b46783173d03cd37a952ba5ab2c9952e7c5.cu
|
#include <vector>
#include <iostream>
#include <limits>
#include <cstdlib>
#include "../utils.h"
#include "../timer.h"
__device__
int nextPowerOf2(const int x) {
return (1 << (32 - __clz(x - 1)));
}
template<int blockSize>
__device__
int blockReduce(const int threadVal, int *smem) {
const int tid = threadIdx.x;
smem[tid] = threadVal;
__syncthreads();
//use this for non-power of 2 blockSizes
for (int shift = nextPowerOf2(blockSize) / 2; shift > 0; shift >>= 1) {
if (tid < shift && tid + shift < blockSize) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
return smem[0];
}
template<int blockSize>
__global__
void reduceMultiBlock(const int* const input, int *sum, int N)
{
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockSize + threadIdx.x;
__shared__ int smem[blockSize];
int myVal = 0;
//first do a serial accumulation into each thread's local accumulator
for (int globalPos = gtid; globalPos < N; globalPos += blockSize * gridDim.x) {
myVal += input[globalPos];
}
int blockSum = blockReduce<blockSize>(myVal, smem);
if (tid == 0)
atomicAdd(sum, blockSum);
}
int main(int argc, char **argv) {
int N = atoi(argv[1]);
std::vector<int> h_input(N);
int h_sum = 0.f;
for (int i = 0; i < N; ++i) {
h_input[i] = rand() % 10;
h_sum += h_input[i];
}
int *d_input;
checkCudaErrors(cudaMalloc(&d_input, N * sizeof(int)));
checkCudaErrors(cudaMemcpy(d_input, &h_input[0], N * sizeof(int), cudaMemcpyHostToDevice));
int *d_sum;
checkCudaErrors(cudaMalloc(&d_sum, sizeof(int)));
checkCudaErrors(cudaMemset(d_sum, 0, sizeof(int)));
const int blockSize = 192;
const int numBlocks = min( ((N+1)/2 + blockSize - 1) / blockSize, 512);
GpuTimer timer; timer.Start();
reduceMultiBlock<blockSize><<<numBlocks, blockSize>>>(d_input, d_sum, N);
timer.Stop();
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int h_d_sum;
checkCudaErrors(cudaMemcpy(&h_d_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost));
std::cout << "time: " << timer.Elapsed() << " ms" << std::endl;
std::cout << "Bandwidth: " << (N * sizeof(int) / 1E6) / timer.Elapsed() << std::endl;
std::cout << "cpu: " << h_sum << " gpu: " << h_d_sum << std::endl;
return 0;
}
|
de053397804befba86dea034dc07279809174cef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include <assert.h>
#include <iostream>
#include <hip/hip_runtime.h>
using std::cout;
#define GNU_C_COMPILER
#if defined(GNU_C_COMPILER)
extern "C" {
#include "cblas.h"
#include "lapacke.h"
#include "lapacke_mangling.h"
}
#elif defined(INTEL_C_COMPILER)
#include "mkl.h"
#endif
//#define VERBOSITY
using std::cout;
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
#define nullptr NULL
#define safeCall(err) __safeCall(err, __FILE__, __LINE__)
inline void __safeCall(hipError_t err, const char * file, const int line)
{
if(hipSuccess != err) {
fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, hipGetErrorString(err));
exit(-1);
}
}
class TimerGPU {
public:
hipEvent_t start, stop;
hipStream_t stream;
TimerGPU(hipStream_t stream_ = 0) : stream(stream_) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, stream);
}
~TimerGPU() {
hipEventDestroy(start);
hipEventDestroy(stop);
}
float read() {
hipEventRecord(stop, stream);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
return time;
}
};
class TimerCPU {
static const int bits = 10;
public:
long long beg_clock;
float freq;
TimerCPU(float freq_) : freq(freq_) {
beg_clock = getTSC(bits);
}
long long getTSC(int bits) {
#ifdef WIN32
return __rdtsc();
#else
unsigned int low, high;
__asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high));
return ((long long)high<<(32 - bits)) | ((long long)low >> bits);
#endif
}
float read() {
long long end_clock = getTSC(bits);
long long Kcycles = end_clock - beg_clock;
float time = (float)(1 << bits) * Kcycles / freq / 1e3f;
return time;
}
};
int iDivUp(int a, int b);
int iDivDown(int a, int b);
int iAlignUp(int a, int b);
int iAlignDown(int a, int b);
template<size_t BX, size_t BY>
class CudaMatrix {
public:
CudaMatrix();
~CudaMatrix();
void allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem);
double download();
double readback();
public:
int M, N;
int padM, padN;
float * h_data;
float * d_data;
bool h_internalAlloc;
bool d_internalAlloc;
};
int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); }
int iDivDown(int a, int b) { return a / b; }
int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); }
int iAlignDown(int a, int b) { return a - a % b; }
template<size_t BX, size_t BY>
void CudaMatrix<BX, BY>::allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem)
{
M = M_;
N = N_;
padM = iAlignUp(M, BY);
padN = iAlignUp(N, BX);
h_data = hostmem;
d_data = devmem;
if(d_data == nullptr) {
long int nbts = sizeof(float) * (long)padM * padN;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
d_data = nullptr;
exit(EXIT_FAILURE);
}
safeCall(hipMalloc((void**)&d_data, nbts));
safeCall(hipMemset(d_data, 0, nbts));
if(d_data == nullptr) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
}
d_internalAlloc = true;
}
if(host && h_data == nullptr) {
long int nbts = sizeof(float) * (long)M * N;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from host memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
h_data = nullptr;
exit(EXIT_FAILURE);
}
h_data = (float*)malloc(nbts);
memset(h_data, 0, nbts);
h_internalAlloc = true;
}
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::CudaMatrix() : M(0), N(0), h_data(nullptr), d_data(nullptr), h_internalAlloc(false), d_internalAlloc(false)
{
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::~CudaMatrix()
{
if(h_internalAlloc && h_data != nullptr) free(h_data);
h_data = nullptr;
if(d_internalAlloc && d_data != nullptr) safeCall(hipFree(d_data));
d_data = nullptr;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::download()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
if(h_data != nullptr && d_data != nullptr) {
safeCall(hipMemcpy2D(d_data, p, h_data, sizeof(float) * N, sizeof(float) * N, M, hipMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSITY
fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::readback()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
// cout << sizeof(float) * N << "\t" << p << "\n";
// if(h_data == nullptr) cout << "1\n";
// if(d_data == nullptr) cout << "2\n";
safeCall(hipMemcpy2D(h_data, sizeof(float) * N, d_data, p, sizeof(float) * N, M, hipMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSITY
fprintf(stdout, "INFO: readback time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
// cache A and cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
// float A_reg[BK];
// float B_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
// C_reg[ii][ij] += A_reg[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A and cache B and prefetching
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB_prefetching(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
float A_reg[BM / TY][BK / TX];
float B_reg[BK / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
if(ik < lda - 1) {
// load block of A to registers
const float * daptr_ = daptr + tidy * lda + BK;
for(int ii = tidy, _ii = 0; ii < BM; ii += TY, _ii++, daptr_ += TY * lda) {
for(int ij = tidx, _ij = 0; ij < BK; ij += TX, _ij++) {
A_reg[_ii][_ij] = daptr_[ij];
}
}
// load block of B to registers
const float * dbptr_ = dbptr + tidy * ldb + stride_b;
for(int ii = tidy, _ii = 0; ii < BK; ii += TY, _ii++, dbptr_ += TY * ldb) {
for(int ij = tidx, _ij = 0; ij < BN; ij += TX, _ij++) {
B_reg[_ii][_ij] = dbptr_[ij];
}
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
// C_reg[ii][ij] += A_reg[kk] * B_smem[kk][in];
}
}
}
if(ik < lda - 1) {
// store registers to A_smem
for(int ii = tidy, _ii = 0; ii < BM; ii += TY, _ii++) {
for(int ij = tidx, _ij = 0; ij < BK; ij += TX, _ij++) {
A_smem[ii][ij] = A_reg[_ii][_ij];
}
}
// store registers to B_smem
for(int ii = tidy, _ii = 0; ii < BK; ii += TY, _ii++) {
for(int ij = tidx, _ij = 0; ij < BN; ij += TX, _ij++) {
B_smem[ii][ij] = B_reg[_ii][_ij];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A and cache B and double-buffering
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
const int HALF_BK = BK / 2;
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM ; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < HALF_BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = HALF_BK + tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + (HALF_BK + tidy) * ldb;
for(int ii = HALF_BK + tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = 0; kk < HALF_BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
}
}
__syncthreads();
daptr += BK, dbptr += stride_b;
if(ik < lda - 1) {
// load block of A to shared memory
daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < HALF_BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = HALF_BK; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
// __shared__ float C_smem[BM][BN];
float C_reg[BM / TY * BN / TX];
float A_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY * BN / TX; ii++) {
C_reg[ii] = 0.f;
}
// for(int im = tidy; im < BM; im += TY) {
// for(int in = tidx; in < BN; in += TX) {
// C_smem[im][in] = 0.f;
// }
// }
// __syncthreads();
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of B to shared memory
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
const float * daptr_ = daptr + tidy * lda;
int ii = 0;
for(int im = tidy; im < BM; im += TY, daptr_ += TY * lda) {
for(int kk = 0; kk < BK; kk++) {
A_reg[kk] = daptr_[kk];
}
for(int in = tidx; in < BN; in += TX) {
float ret = 0.f;
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
ret += A_reg[kk] * B_smem[kk][in];
// dcptr_[in] += daptr_[kk] * B_smem[kk][in];
// C_smem[im][in] += A_reg[kk] * B_smem[kk][in];
}
// C_smem[im][in] += ret;
C_reg[ii++] += ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
int ii = 0;
for(int im = tidy; im < BM; im += TY, dcptr_ += TY * ldc) {
for(int in = tidx; in < BN; in += TX) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii++];
// dcptr_[in] = beta * dcptr_[in] + alpha * C_smem[im][in];
}
}
}
// cache B and double buffering
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
// float A_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
const int HALF_BK = BK / 2;
// load block of B to shared memory
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK) {
// load block of B to shared memory
const float * dbptr_ = dbptr + (HALF_BK + tidy) * ldb;
for(int ii = HALF_BK + tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
const float * daptr_ = daptr + tidy * lda;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = 0; kk < HALF_BK; kk++) {
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
daptr += BK, dbptr += stride_b;
if(ik < lda - 1) {
// load block of B to shared memory
dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
}
daptr_ = daptr + tidy * lda - BK;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = HALF_BK; kk < BK; kk++) {
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// unrolling
#define Bm 64
#define Bk 16
#define Bn 128
#define Tx 16
#define Ty 16
__device__ __inline__ float s_dot16(const float * a, float * bs)
{
float ret = 0.f;
ret += a[ 0] * bs[0 * 128];
ret += a[ 1] * bs[1 * 128];
ret += a[ 2] * bs[2 * 128];
ret += a[ 3] * bs[3 * 128];
ret += a[ 4] * bs[4 * 128];
ret += a[ 5] * bs[5 * 128];
ret += a[ 6] * bs[6 * 128];
ret += a[ 7] * bs[7 * 128];
ret += a[ 8] * bs[8 * 128];
ret += a[ 9] * bs[9 * 128];
ret += a[10] * bs[10 * 128];
ret += a[11] * bs[11 * 128];
ret += a[12] * bs[12 * 128];
ret += a[13] * bs[13 * 128];
ret += a[14] * bs[14 * 128];
ret += a[15] * bs[15 * 128];
return ret;
}
__device__ __inline__ void s_dot8(float * c, float a, float * bs)
{
c[0] += a * bs[0 * 16];
c[1] += a * bs[1 * 16];
c[2] += a * bs[2 * 16];
c[3] += a * bs[3 * 16];
c[4] += a * bs[4 * 16];
c[5] += a * bs[5 * 16];
c[6] += a * bs[6 * 16];
c[7] += a * bs[7 * 16];
}
__global__ void mysgemm_cache_B_unrolling(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[2048];
float C_reg[32] = {0.f};
// float A_reg[16] = {0.f};
__shared__ float A_smem[16];
// const unsigned int gy = (blockIdx.y << 6);
// const unsigned int gx = (blockIdx.x << 7);
// const float * daptr = dA + gy * lda;
// const float * dbptr = dB + gx;
const float * daptr = dA + ((blockIdx.y<<6) + threadIdx.y) * lda;
const float * dbptr = dB + (blockIdx.x<<7) + threadIdx.y * ldb + threadIdx.x;
float * dcptr = dC + ((blockIdx.y<<6) + threadIdx.y) * ldc + (blockIdx.x<<7) + threadIdx.x;
for(int ik = 0; ik < lda; ik += 16) {
float * Bs = &B_smem[(threadIdx.y<<7) + threadIdx.x];
Bs[0 * 16] = dbptr[0 * 16];
Bs[1 * 16] = dbptr[1 * 16];
Bs[2 * 16] = dbptr[2 * 16];
Bs[3 * 16] = dbptr[3 * 16];
Bs[4 * 16] = dbptr[4 * 16];
Bs[5 * 16] = dbptr[5 * 16];
Bs[6 * 16] = dbptr[6 * 16];
Bs[7 * 16] = dbptr[7 * 16];
__syncthreads();
const float * daptr_ = daptr;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
if(threadIdx.y == 0) A_smem[threadIdx.x] = daptr_[threadIdx.x];
// __syncthreads();
// A_reg[ 0] = daptr_[ 0];
// A_reg[ 1] = daptr_[ 1];
// A_reg[ 2] = daptr_[ 2];
// A_reg[ 3] = daptr_[ 3];
// A_reg[ 4] = daptr_[ 4];
// A_reg[ 5] = daptr_[ 5];
// A_reg[ 6] = daptr_[ 6];
// A_reg[ 7] = daptr_[ 7];
// A_reg[ 8] = daptr_[ 8];
// A_reg[ 9] = daptr_[ 9];
// A_reg[10] = daptr_[10];
// A_reg[11] = daptr_[11];
// A_reg[12] = daptr_[12];
// A_reg[13] = daptr_[13];
// A_reg[14] = daptr_[14];
// A_reg[15] = daptr_[15];
//
Bs = &B_smem[threadIdx.x];
s_dot8(C_reg_, A_smem[0], &Bs[0 * 128]);
s_dot8(C_reg_, A_smem[1], &Bs[1 * 128]);
s_dot8(C_reg_, A_smem[2], &Bs[2 * 128]);
s_dot8(C_reg_, A_smem[3], &Bs[3 * 128]);
s_dot8(C_reg_, A_smem[4], &Bs[4 * 128]);
s_dot8(C_reg_, A_smem[5], &Bs[5 * 128]);
s_dot8(C_reg_, A_smem[6], &Bs[6 * 128]);
s_dot8(C_reg_, A_smem[7], &Bs[7 * 128]);
s_dot8(C_reg_, A_smem[8], &Bs[8 * 128]);
s_dot8(C_reg_, A_smem[9], &Bs[9 * 128]);
s_dot8(C_reg_, A_smem[10], &Bs[10 * 128]);
s_dot8(C_reg_, A_smem[11], &Bs[11 * 128]);
s_dot8(C_reg_, A_smem[12], &Bs[12 * 128]);
s_dot8(C_reg_, A_smem[13], &Bs[13 * 128]);
s_dot8(C_reg_, A_smem[14], &Bs[14 * 128]);
s_dot8(C_reg_, A_smem[15], &Bs[15 * 128]);
C_reg_ += 8;
daptr_ += (lda<<4);
}
__syncthreads();
daptr += 16;
dbptr += (ldb<<4);
}
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
dcptr[0 * 16] = beta * dcptr[0 * 16] + alpha * C_reg_[0];
dcptr[1 * 16] = beta * dcptr[1 * 16] + alpha * C_reg_[1];
dcptr[2 * 16] = beta * dcptr[2 * 16] + alpha * C_reg_[2];
dcptr[3 * 16] = beta * dcptr[3 * 16] + alpha * C_reg_[3];
dcptr[4 * 16] = beta * dcptr[4 * 16] + alpha * C_reg_[4];
dcptr[5 * 16] = beta * dcptr[5 * 16] + alpha * C_reg_[5];
dcptr[6 * 16] = beta * dcptr[6 * 16] + alpha * C_reg_[6];
dcptr[7 * 16] = beta * dcptr[7 * 16] + alpha * C_reg_[7];
dcptr += (ldc<<4);
C_reg_ += 8;
}
}
__global__ void mysgemm_cache_B_unrolling_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[2048];
float C_reg[32] = {0.f};
float A_reg[16] = {0.f};
const unsigned int gy = (blockIdx.y << 6);
const unsigned int gx = (blockIdx.x << 7);
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
for(int ik = 0; ik < lda; ik += Bk) {
const float * dbptr_ = dbptr + threadIdx.y * ldb + threadIdx.x;
float * Bs = &B_smem[(threadIdx.y<<7) + threadIdx.x];
B_smem[0 * 16] = dbptr_[0 * 16];
B_smem[1 * 16] = dbptr_[1 * 16];
B_smem[2 * 16] = dbptr_[2 * 16];
B_smem[3 * 16] = dbptr_[3 * 16];
B_smem[4 * 16] = dbptr_[4 * 16];
B_smem[5 * 16] = dbptr_[5 * 16];
B_smem[6 * 16] = dbptr_[6 * 16];
B_smem[7 * 16] = dbptr_[7 * 16];
__syncthreads();
const float * daptr_ = daptr + threadIdx.y * lda;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
A_reg[ 0] = daptr_[ 0];
A_reg[ 1] = daptr_[ 1];
A_reg[ 2] = daptr_[ 2];
A_reg[ 3] = daptr_[ 3];
A_reg[ 4] = daptr_[ 4];
A_reg[ 5] = daptr_[ 5];
A_reg[ 6] = daptr_[ 6];
A_reg[ 7] = daptr_[ 7];
A_reg[ 8] = daptr_[ 8];
A_reg[ 9] = daptr_[ 9];
A_reg[10] = daptr_[10];
A_reg[11] = daptr_[11];
A_reg[12] = daptr_[12];
A_reg[13] = daptr_[13];
A_reg[14] = daptr_[14];
A_reg[15] = daptr_[15];
Bs = &B_smem[threadIdx.x];
C_reg_[0] += s_dot16(A_reg, &Bs[0 * 16]);
C_reg_[1] += s_dot16(A_reg, &Bs[1 * 16]);
C_reg_[2] += s_dot16(A_reg, &Bs[2 * 16]);
C_reg_[3] += s_dot16(A_reg, &Bs[3 * 16]);
C_reg_[4] += s_dot16(A_reg, &Bs[4 * 16]);
C_reg_[5] += s_dot16(A_reg, &Bs[5 * 16]);
C_reg_[6] += s_dot16(A_reg, &Bs[6 * 16]);
C_reg_[7] += s_dot16(A_reg, &Bs[7 * 16]);
C_reg_ += 8;
daptr_ += (lda<<4);
}
__syncthreads();
}
float * dcptr_ = dcptr + threadIdx.y * ldc + threadIdx.x;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
dcptr_[0 * 16] = beta * dcptr_[0 * 16] + alpha * C_reg_[0];
dcptr_[1 * 16] = beta * dcptr_[1 * 16] + alpha * C_reg_[1];
dcptr_[2 * 16] = beta * dcptr_[2 * 16] + alpha * C_reg_[2];
dcptr_[3 * 16] = beta * dcptr_[3 * 16] + alpha * C_reg_[3];
dcptr_[4 * 16] = beta * dcptr_[4 * 16] + alpha * C_reg_[4];
dcptr_[5 * 16] = beta * dcptr_[5 * 16] + alpha * C_reg_[5];
dcptr_[6 * 16] = beta * dcptr_[6 * 16] + alpha * C_reg_[6];
dcptr_[7 * 16] = beta * dcptr_[7 * 16] + alpha * C_reg_[7];
dcptr_ += (ldc<<4);
C_reg_ += 8;
}
}
// cache A
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_A(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
// __shared__ float B_smem[BK][BN];
// __shared__ float B_smem[BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
__syncthreads();
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++, dbptr_ += ldb) {
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[in];
C_reg[ii][ij] += A_smem[im][kk] * dbptr_[in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
void mygemm_wrapper(const int M, const int K, const int N, const float alpha, const float * A, const int lda, const float * B, const int ldb, const float beta, float * C, const int ldc)
{
CudaMatrix<BK, BM> wrapperA;
wrapperA.allocate(M, lda, false, nullptr, const_cast<float*>(A));
wrapperA.download();
CudaMatrix<BN, BK> wrapperB;
wrapperB.allocate(K, ldb, false, nullptr, const_cast<float*>(B));
wrapperB.download();
CudaMatrix<BN, BM> wrapperC;
wrapperC.allocate(M, ldc, false, nullptr, C);
wrapperC.download();
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix A, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperA.padM, wrapperA.padN);
fprintf(stdout, "INFO: matrix B, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperB.padM, wrapperB.padN);
fprintf(stdout, "INFO: matrix C, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperC.padM, wrapperC.padN);
#endif
dim3 grid( wrapperC.padN / BN, wrapperA.padM / BM, 1 );
dim3 threads( TX, TY, 1 );
TimerGPU timer(0);
hipLaunchKernelGGL(( mysgemm_cache_B<BM, BK, BN, TX, TY>), dim3(grid), dim3(threads), 0, 0, alpha, wrapperA.d_data, wrapperA.padN, wrapperB.d_data, wrapperB.padN, beta, wrapperC.d_data, wrapperC.padN);
double gpuTime = timer.read();
// wrapperA.readback();
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// fprintf(stdout, "%02.2f\t", A[i * N + j]);
// }
// fprintf(stdout, "\n");
// }
// fflush(stdout);
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", gpuTime);
#ifdef VERBOSITY
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (gpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
wrapperC.readback();
}
void constantInit(float * data, long int size, float val)
{
for(long int i = 0; i < size; i++) {
data[i] = val;
}
}
int main(int argc, char * argv[])
{
if(argc != 4) {
fprintf(stderr, "USAGE: M K N\n");
return -1;
}
int M = atoi(argv[1]);
int K = atoi(argv[2]);
int N = atoi(argv[3]);
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix A (MxK) multiply matrix B (KxN), result matrix C (MxN).\n");
fprintf(stdout, "INFO: M = %d, K = %d, N = %d\n", M, K, N);
fflush(stdout);
#endif
float * h_A = (float*)malloc(sizeof(float) * M * K);
float * h_B = (float*)malloc(sizeof(float) * K * N);
float * h_C = (float*)malloc(sizeof(float) * M * N);
float * h_D = (float*)malloc(sizeof(float) * M * N);
const float valB = 0.01f;
long int size_A = M * K;
long int size_B = K * N;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
long int size_C = M * N;
long int size_D = size_C;
memset(h_C, 0, sizeof(float) * size_C);
memset(h_D, 0, sizeof(float) * size_D);
// warm up
mygemm_wrapper<ROW_BLOCK_A, ROW_BLOCK_B, COL_BLOCK_C, THREAD_BLOCK_X, THREAD_BLOCK_Y>(
M, K, N, 1.f,
h_A, K, h_B, N, 0.f, h_C, N);
// mygemm_wrapper<128, 32, 64, 32, 8>(
// M, K, N, 1.f,
// h_A, K, h_B, N, 0.f, h_C, N);
// double t0 = omp_get_wtime();
TimerCPU timer(2.60 * 1000);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0f, h_A, K, h_B, N, 0.0f, h_D, N);
double cpuTime = timer.read();
// t0 = omp_get_wtime() - t0;
// cout << t0 << "\n";
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", cpuTime);
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (cpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
// test relative error
bool correct = true;
double eps = 1.e-6;
// for(long int i = 0; i < size_C; i++) {
// double abs_err = fabs(h_C[i] - h_D[i]);
// double dot_length = K;
// double abs_val = fabs(h_C[i]);
// double rel_err = abs_err / abs_val / dot_length;
//
// if (rel_err > eps) {
//// fprintf(stderr, "ERROR: Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_D[i], eps);
// correct = false;
//
// }
// }
fprintf(stdout, "%s\n", correct ? "Result = PASS" : "Result = FAIL");
fflush(stdout);
free(h_A); h_A = nullptr;
free(h_B); h_B = nullptr;
free(h_C); h_C = nullptr;
free(h_D); h_D = nullptr;
if (!correct) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
de053397804befba86dea034dc07279809174cef.cu
|
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include <assert.h>
#include <iostream>
#include <cuda_runtime.h>
using std::cout;
#define GNU_C_COMPILER
#if defined(GNU_C_COMPILER)
extern "C" {
#include "cblas.h"
#include "lapacke.h"
#include "lapacke_mangling.h"
}
#elif defined(INTEL_C_COMPILER)
#include "mkl.h"
#endif
//#define VERBOSITY
using std::cout;
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
#define nullptr NULL
#define safeCall(err) __safeCall(err, __FILE__, __LINE__)
inline void __safeCall(cudaError err, const char * file, const int line)
{
if(cudaSuccess != err) {
fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, cudaGetErrorString(err));
exit(-1);
}
}
class TimerGPU {
public:
cudaEvent_t start, stop;
cudaStream_t stream;
TimerGPU(cudaStream_t stream_ = 0) : stream(stream_) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, stream);
}
~TimerGPU() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
float read() {
cudaEventRecord(stop, stream);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
return time;
}
};
class TimerCPU {
static const int bits = 10;
public:
long long beg_clock;
float freq;
TimerCPU(float freq_) : freq(freq_) {
beg_clock = getTSC(bits);
}
long long getTSC(int bits) {
#ifdef WIN32
return __rdtsc();
#else
unsigned int low, high;
__asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high));
return ((long long)high<<(32 - bits)) | ((long long)low >> bits);
#endif
}
float read() {
long long end_clock = getTSC(bits);
long long Kcycles = end_clock - beg_clock;
float time = (float)(1 << bits) * Kcycles / freq / 1e3f;
return time;
}
};
int iDivUp(int a, int b);
int iDivDown(int a, int b);
int iAlignUp(int a, int b);
int iAlignDown(int a, int b);
template<size_t BX, size_t BY>
class CudaMatrix {
public:
CudaMatrix();
~CudaMatrix();
void allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem);
double download();
double readback();
public:
int M, N;
int padM, padN;
float * h_data;
float * d_data;
bool h_internalAlloc;
bool d_internalAlloc;
};
int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); }
int iDivDown(int a, int b) { return a / b; }
int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); }
int iAlignDown(int a, int b) { return a - a % b; }
template<size_t BX, size_t BY>
void CudaMatrix<BX, BY>::allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem)
{
M = M_;
N = N_;
padM = iAlignUp(M, BY);
padN = iAlignUp(N, BX);
h_data = hostmem;
d_data = devmem;
if(d_data == nullptr) {
long int nbts = sizeof(float) * (long)padM * padN;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
d_data = nullptr;
exit(EXIT_FAILURE);
}
safeCall(cudaMalloc((void**)&d_data, nbts));
safeCall(cudaMemset(d_data, 0, nbts));
if(d_data == nullptr) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
}
d_internalAlloc = true;
}
if(host && h_data == nullptr) {
long int nbts = sizeof(float) * (long)M * N;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %ld bytes from host memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
h_data = nullptr;
exit(EXIT_FAILURE);
}
h_data = (float*)malloc(nbts);
memset(h_data, 0, nbts);
h_internalAlloc = true;
}
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::CudaMatrix() : M(0), N(0), h_data(nullptr), d_data(nullptr), h_internalAlloc(false), d_internalAlloc(false)
{
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::~CudaMatrix()
{
if(h_internalAlloc && h_data != nullptr) free(h_data);
h_data = nullptr;
if(d_internalAlloc && d_data != nullptr) safeCall(cudaFree(d_data));
d_data = nullptr;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::download()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
if(h_data != nullptr && d_data != nullptr) {
safeCall(cudaMemcpy2D(d_data, p, h_data, sizeof(float) * N, sizeof(float) * N, M, cudaMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSITY
fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::readback()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
// cout << sizeof(float) * N << "\t" << p << "\n";
// if(h_data == nullptr) cout << "1\n";
// if(d_data == nullptr) cout << "2\n";
safeCall(cudaMemcpy2D(h_data, sizeof(float) * N, d_data, p, sizeof(float) * N, M, cudaMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSITY
fprintf(stdout, "INFO: readback time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
// cache A and cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
// float A_reg[BK];
// float B_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
// C_reg[ii][ij] += A_reg[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A and cache B and prefetching
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB_prefetching(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
float A_reg[BM / TY][BK / TX];
float B_reg[BK / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
if(ik < lda - 1) {
// load block of A to registers
const float * daptr_ = daptr + tidy * lda + BK;
for(int ii = tidy, _ii = 0; ii < BM; ii += TY, _ii++, daptr_ += TY * lda) {
for(int ij = tidx, _ij = 0; ij < BK; ij += TX, _ij++) {
A_reg[_ii][_ij] = daptr_[ij];
}
}
// load block of B to registers
const float * dbptr_ = dbptr + tidy * ldb + stride_b;
for(int ii = tidy, _ii = 0; ii < BK; ii += TY, _ii++, dbptr_ += TY * ldb) {
for(int ij = tidx, _ij = 0; ij < BN; ij += TX, _ij++) {
B_reg[_ii][_ij] = dbptr_[ij];
}
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
// C_reg[ii][ij] += A_reg[kk] * B_smem[kk][in];
}
}
}
if(ik < lda - 1) {
// store registers to A_smem
for(int ii = tidy, _ii = 0; ii < BM; ii += TY, _ii++) {
for(int ij = tidx, _ij = 0; ij < BK; ij += TX, _ij++) {
A_smem[ii][ij] = A_reg[_ii][_ij];
}
}
// store registers to B_smem
for(int ii = tidy, _ii = 0; ii < BK; ii += TY, _ii++) {
for(int ij = tidx, _ij = 0; ij < BN; ij += TX, _ij++) {
B_smem[ii][ij] = B_reg[_ii][_ij];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A and cache B and double-buffering
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
const int HALF_BK = BK / 2;
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM ; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < HALF_BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = HALF_BK + tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + (HALF_BK + tidy) * ldb;
for(int ii = HALF_BK + tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = 0; kk < HALF_BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
}
}
__syncthreads();
daptr += BK, dbptr += stride_b;
if(ik < lda - 1) {
// load block of A to shared memory
daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < HALF_BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
}
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = HALF_BK; kk < BK; kk++) {
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
// __shared__ float C_smem[BM][BN];
float C_reg[BM / TY * BN / TX];
float A_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY * BN / TX; ii++) {
C_reg[ii] = 0.f;
}
// for(int im = tidy; im < BM; im += TY) {
// for(int in = tidx; in < BN; in += TX) {
// C_smem[im][in] = 0.f;
// }
// }
// __syncthreads();
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of B to shared memory
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
const float * daptr_ = daptr + tidy * lda;
int ii = 0;
for(int im = tidy; im < BM; im += TY, daptr_ += TY * lda) {
for(int kk = 0; kk < BK; kk++) {
A_reg[kk] = daptr_[kk];
}
for(int in = tidx; in < BN; in += TX) {
float ret = 0.f;
#pragma unroll
for(int kk = 0; kk < BK; kk++) {
ret += A_reg[kk] * B_smem[kk][in];
// dcptr_[in] += daptr_[kk] * B_smem[kk][in];
// C_smem[im][in] += A_reg[kk] * B_smem[kk][in];
}
// C_smem[im][in] += ret;
C_reg[ii++] += ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
int ii = 0;
for(int im = tidy; im < BM; im += TY, dcptr_ += TY * ldc) {
for(int in = tidx; in < BN; in += TX) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii++];
// dcptr_[in] = beta * dcptr_[in] + alpha * C_smem[im][in];
}
}
}
// cache B and double buffering
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
// float A_reg[BK];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
const int HALF_BK = BK / 2;
// load block of B to shared memory
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
for(int ik = 0; ik < lda; ik += BK) {
// load block of B to shared memory
const float * dbptr_ = dbptr + (HALF_BK + tidy) * ldb;
for(int ii = HALF_BK + tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
const float * daptr_ = daptr + tidy * lda;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = 0; kk < HALF_BK; kk++) {
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
daptr += BK, dbptr += stride_b;
if(ik < lda - 1) {
// load block of B to shared memory
dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < HALF_BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
}
daptr_ = daptr + tidy * lda - BK;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
for(int kk = HALF_BK; kk < BK; kk++) {
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// unrolling
#define Bm 64
#define Bk 16
#define Bn 128
#define Tx 16
#define Ty 16
__device__ __inline__ float s_dot16(const float * a, float * bs)
{
float ret = 0.f;
ret += a[ 0] * bs[0 * 128];
ret += a[ 1] * bs[1 * 128];
ret += a[ 2] * bs[2 * 128];
ret += a[ 3] * bs[3 * 128];
ret += a[ 4] * bs[4 * 128];
ret += a[ 5] * bs[5 * 128];
ret += a[ 6] * bs[6 * 128];
ret += a[ 7] * bs[7 * 128];
ret += a[ 8] * bs[8 * 128];
ret += a[ 9] * bs[9 * 128];
ret += a[10] * bs[10 * 128];
ret += a[11] * bs[11 * 128];
ret += a[12] * bs[12 * 128];
ret += a[13] * bs[13 * 128];
ret += a[14] * bs[14 * 128];
ret += a[15] * bs[15 * 128];
return ret;
}
__device__ __inline__ void s_dot8(float * c, float a, float * bs)
{
c[0] += a * bs[0 * 16];
c[1] += a * bs[1 * 16];
c[2] += a * bs[2 * 16];
c[3] += a * bs[3 * 16];
c[4] += a * bs[4 * 16];
c[5] += a * bs[5 * 16];
c[6] += a * bs[6 * 16];
c[7] += a * bs[7 * 16];
}
__global__ void mysgemm_cache_B_unrolling(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[2048];
float C_reg[32] = {0.f};
// float A_reg[16] = {0.f};
__shared__ float A_smem[16];
// const unsigned int gy = (blockIdx.y << 6);
// const unsigned int gx = (blockIdx.x << 7);
// const float * daptr = dA + gy * lda;
// const float * dbptr = dB + gx;
const float * daptr = dA + ((blockIdx.y<<6) + threadIdx.y) * lda;
const float * dbptr = dB + (blockIdx.x<<7) + threadIdx.y * ldb + threadIdx.x;
float * dcptr = dC + ((blockIdx.y<<6) + threadIdx.y) * ldc + (blockIdx.x<<7) + threadIdx.x;
for(int ik = 0; ik < lda; ik += 16) {
float * Bs = &B_smem[(threadIdx.y<<7) + threadIdx.x];
Bs[0 * 16] = dbptr[0 * 16];
Bs[1 * 16] = dbptr[1 * 16];
Bs[2 * 16] = dbptr[2 * 16];
Bs[3 * 16] = dbptr[3 * 16];
Bs[4 * 16] = dbptr[4 * 16];
Bs[5 * 16] = dbptr[5 * 16];
Bs[6 * 16] = dbptr[6 * 16];
Bs[7 * 16] = dbptr[7 * 16];
__syncthreads();
const float * daptr_ = daptr;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
if(threadIdx.y == 0) A_smem[threadIdx.x] = daptr_[threadIdx.x];
// __syncthreads();
// A_reg[ 0] = daptr_[ 0];
// A_reg[ 1] = daptr_[ 1];
// A_reg[ 2] = daptr_[ 2];
// A_reg[ 3] = daptr_[ 3];
// A_reg[ 4] = daptr_[ 4];
// A_reg[ 5] = daptr_[ 5];
// A_reg[ 6] = daptr_[ 6];
// A_reg[ 7] = daptr_[ 7];
// A_reg[ 8] = daptr_[ 8];
// A_reg[ 9] = daptr_[ 9];
// A_reg[10] = daptr_[10];
// A_reg[11] = daptr_[11];
// A_reg[12] = daptr_[12];
// A_reg[13] = daptr_[13];
// A_reg[14] = daptr_[14];
// A_reg[15] = daptr_[15];
//
Bs = &B_smem[threadIdx.x];
s_dot8(C_reg_, A_smem[0], &Bs[0 * 128]);
s_dot8(C_reg_, A_smem[1], &Bs[1 * 128]);
s_dot8(C_reg_, A_smem[2], &Bs[2 * 128]);
s_dot8(C_reg_, A_smem[3], &Bs[3 * 128]);
s_dot8(C_reg_, A_smem[4], &Bs[4 * 128]);
s_dot8(C_reg_, A_smem[5], &Bs[5 * 128]);
s_dot8(C_reg_, A_smem[6], &Bs[6 * 128]);
s_dot8(C_reg_, A_smem[7], &Bs[7 * 128]);
s_dot8(C_reg_, A_smem[8], &Bs[8 * 128]);
s_dot8(C_reg_, A_smem[9], &Bs[9 * 128]);
s_dot8(C_reg_, A_smem[10], &Bs[10 * 128]);
s_dot8(C_reg_, A_smem[11], &Bs[11 * 128]);
s_dot8(C_reg_, A_smem[12], &Bs[12 * 128]);
s_dot8(C_reg_, A_smem[13], &Bs[13 * 128]);
s_dot8(C_reg_, A_smem[14], &Bs[14 * 128]);
s_dot8(C_reg_, A_smem[15], &Bs[15 * 128]);
C_reg_ += 8;
daptr_ += (lda<<4);
}
__syncthreads();
daptr += 16;
dbptr += (ldb<<4);
}
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
dcptr[0 * 16] = beta * dcptr[0 * 16] + alpha * C_reg_[0];
dcptr[1 * 16] = beta * dcptr[1 * 16] + alpha * C_reg_[1];
dcptr[2 * 16] = beta * dcptr[2 * 16] + alpha * C_reg_[2];
dcptr[3 * 16] = beta * dcptr[3 * 16] + alpha * C_reg_[3];
dcptr[4 * 16] = beta * dcptr[4 * 16] + alpha * C_reg_[4];
dcptr[5 * 16] = beta * dcptr[5 * 16] + alpha * C_reg_[5];
dcptr[6 * 16] = beta * dcptr[6 * 16] + alpha * C_reg_[6];
dcptr[7 * 16] = beta * dcptr[7 * 16] + alpha * C_reg_[7];
dcptr += (ldc<<4);
C_reg_ += 8;
}
}
__global__ void mysgemm_cache_B_unrolling_double_buffering(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[2048];
float C_reg[32] = {0.f};
float A_reg[16] = {0.f};
const unsigned int gy = (blockIdx.y << 6);
const unsigned int gx = (blockIdx.x << 7);
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
for(int ik = 0; ik < lda; ik += Bk) {
const float * dbptr_ = dbptr + threadIdx.y * ldb + threadIdx.x;
float * Bs = &B_smem[(threadIdx.y<<7) + threadIdx.x];
B_smem[0 * 16] = dbptr_[0 * 16];
B_smem[1 * 16] = dbptr_[1 * 16];
B_smem[2 * 16] = dbptr_[2 * 16];
B_smem[3 * 16] = dbptr_[3 * 16];
B_smem[4 * 16] = dbptr_[4 * 16];
B_smem[5 * 16] = dbptr_[5 * 16];
B_smem[6 * 16] = dbptr_[6 * 16];
B_smem[7 * 16] = dbptr_[7 * 16];
__syncthreads();
const float * daptr_ = daptr + threadIdx.y * lda;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
A_reg[ 0] = daptr_[ 0];
A_reg[ 1] = daptr_[ 1];
A_reg[ 2] = daptr_[ 2];
A_reg[ 3] = daptr_[ 3];
A_reg[ 4] = daptr_[ 4];
A_reg[ 5] = daptr_[ 5];
A_reg[ 6] = daptr_[ 6];
A_reg[ 7] = daptr_[ 7];
A_reg[ 8] = daptr_[ 8];
A_reg[ 9] = daptr_[ 9];
A_reg[10] = daptr_[10];
A_reg[11] = daptr_[11];
A_reg[12] = daptr_[12];
A_reg[13] = daptr_[13];
A_reg[14] = daptr_[14];
A_reg[15] = daptr_[15];
Bs = &B_smem[threadIdx.x];
C_reg_[0] += s_dot16(A_reg, &Bs[0 * 16]);
C_reg_[1] += s_dot16(A_reg, &Bs[1 * 16]);
C_reg_[2] += s_dot16(A_reg, &Bs[2 * 16]);
C_reg_[3] += s_dot16(A_reg, &Bs[3 * 16]);
C_reg_[4] += s_dot16(A_reg, &Bs[4 * 16]);
C_reg_[5] += s_dot16(A_reg, &Bs[5 * 16]);
C_reg_[6] += s_dot16(A_reg, &Bs[6 * 16]);
C_reg_[7] += s_dot16(A_reg, &Bs[7 * 16]);
C_reg_ += 8;
daptr_ += (lda<<4);
}
__syncthreads();
}
float * dcptr_ = dcptr + threadIdx.y * ldc + threadIdx.x;
float * C_reg_ = C_reg;
#pragma unroll
for(int im = 0; im < 64; im += 16) {
dcptr_[0 * 16] = beta * dcptr_[0 * 16] + alpha * C_reg_[0];
dcptr_[1 * 16] = beta * dcptr_[1 * 16] + alpha * C_reg_[1];
dcptr_[2 * 16] = beta * dcptr_[2 * 16] + alpha * C_reg_[2];
dcptr_[3 * 16] = beta * dcptr_[3 * 16] + alpha * C_reg_[3];
dcptr_[4 * 16] = beta * dcptr_[4 * 16] + alpha * C_reg_[4];
dcptr_[5 * 16] = beta * dcptr_[5 * 16] + alpha * C_reg_[5];
dcptr_[6 * 16] = beta * dcptr_[6 * 16] + alpha * C_reg_[6];
dcptr_[7 * 16] = beta * dcptr_[7 * 16] + alpha * C_reg_[7];
dcptr_ += (ldc<<4);
C_reg_ += 8;
}
}
// cache A
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_A(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
// __shared__ float B_smem[BK][BN];
// __shared__ float B_smem[BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
__syncthreads();
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++, dbptr_ += ldb) {
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[in];
C_reg[ii][ij] += A_smem[im][kk] * dbptr_[in];
}
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
void mygemm_wrapper(const int M, const int K, const int N, const float alpha, const float * A, const int lda, const float * B, const int ldb, const float beta, float * C, const int ldc)
{
CudaMatrix<BK, BM> wrapperA;
wrapperA.allocate(M, lda, false, nullptr, const_cast<float*>(A));
wrapperA.download();
CudaMatrix<BN, BK> wrapperB;
wrapperB.allocate(K, ldb, false, nullptr, const_cast<float*>(B));
wrapperB.download();
CudaMatrix<BN, BM> wrapperC;
wrapperC.allocate(M, ldc, false, nullptr, C);
wrapperC.download();
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix A, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperA.padM, wrapperA.padN);
fprintf(stdout, "INFO: matrix B, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperB.padM, wrapperB.padN);
fprintf(stdout, "INFO: matrix C, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperC.padM, wrapperC.padN);
#endif
dim3 grid( wrapperC.padN / BN, wrapperA.padM / BM, 1 );
dim3 threads( TX, TY, 1 );
TimerGPU timer(0);
mysgemm_cache_B<BM, BK, BN, TX, TY><<<grid, threads>>>(alpha, wrapperA.d_data, wrapperA.padN, wrapperB.d_data, wrapperB.padN, beta, wrapperC.d_data, wrapperC.padN);
double gpuTime = timer.read();
// wrapperA.readback();
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// fprintf(stdout, "%02.2f\t", A[i * N + j]);
// }
// fprintf(stdout, "\n");
// }
// fflush(stdout);
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", gpuTime);
#ifdef VERBOSITY
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (gpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
wrapperC.readback();
}
void constantInit(float * data, long int size, float val)
{
for(long int i = 0; i < size; i++) {
data[i] = val;
}
}
int main(int argc, char * argv[])
{
if(argc != 4) {
fprintf(stderr, "USAGE: M K N\n");
return -1;
}
int M = atoi(argv[1]);
int K = atoi(argv[2]);
int N = atoi(argv[3]);
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix A (MxK) multiply matrix B (KxN), result matrix C (MxN).\n");
fprintf(stdout, "INFO: M = %d, K = %d, N = %d\n", M, K, N);
fflush(stdout);
#endif
float * h_A = (float*)malloc(sizeof(float) * M * K);
float * h_B = (float*)malloc(sizeof(float) * K * N);
float * h_C = (float*)malloc(sizeof(float) * M * N);
float * h_D = (float*)malloc(sizeof(float) * M * N);
const float valB = 0.01f;
long int size_A = M * K;
long int size_B = K * N;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
long int size_C = M * N;
long int size_D = size_C;
memset(h_C, 0, sizeof(float) * size_C);
memset(h_D, 0, sizeof(float) * size_D);
// warm up
mygemm_wrapper<ROW_BLOCK_A, ROW_BLOCK_B, COL_BLOCK_C, THREAD_BLOCK_X, THREAD_BLOCK_Y>(
M, K, N, 1.f,
h_A, K, h_B, N, 0.f, h_C, N);
// mygemm_wrapper<128, 32, 64, 32, 8>(
// M, K, N, 1.f,
// h_A, K, h_B, N, 0.f, h_C, N);
// double t0 = omp_get_wtime();
TimerCPU timer(2.60 * 1000);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0f, h_A, K, h_B, N, 0.0f, h_D, N);
double cpuTime = timer.read();
// t0 = omp_get_wtime() - t0;
// cout << t0 << "\n";
#ifdef VERBOSITY
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", cpuTime);
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (cpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
// test relative error
bool correct = true;
double eps = 1.e-6;
// for(long int i = 0; i < size_C; i++) {
// double abs_err = fabs(h_C[i] - h_D[i]);
// double dot_length = K;
// double abs_val = fabs(h_C[i]);
// double rel_err = abs_err / abs_val / dot_length;
//
// if (rel_err > eps) {
//// fprintf(stderr, "ERROR: Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_D[i], eps);
// correct = false;
//
// }
// }
fprintf(stdout, "%s\n", correct ? "Result = PASS" : "Result = FAIL");
fflush(stdout);
free(h_A); h_A = nullptr;
free(h_B); h_B = nullptr;
free(h_C); h_C = nullptr;
free(h_D); h_D = nullptr;
if (!correct) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
a5da8913f7f46770b6bed996642182a20746ef9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include<iostream>
#include "cpu_anim.h"
using namespace std;
#include <device_launch_parameters.h>
//Window dim
#define DIM 512
//Create struct to hold device and host pointers to image bitmap
struct DataBlock {
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void cleanup(DataBlock *d)
{
hipFree(d->dev_bitmap);
}
//Call kernel each time for a thread, each thread mapped to pixel which is animated
__global__ void kernel(unsigned char *ptr, int ticks)
{
//Index one of the threads to an image pos
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
//Create varying grey vals depending on pixel
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
//Offset into output buffer for window generation when ready
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
//Continuously generate frames as required by window, constantly creating threads for image gen, copying output pix to host, then destroying all processes and repeating
void generate_frame(DataBlock *d, int render)
{
//Create 2D grid of blocks, for every 16 threads there exists one
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads) , 0, 0, d->dev_bitmap, render);
//Copy contents at current render/frame, into dev bitmap for changing frame and computations
hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost);
}
int main(void)
{
DataBlock data;
//construct with ref to data, shares same address, CPUAnimBitmap implements void ptr which implies that it can be associated w any data type
CPUAnimBitmap bitmap(DIM, DIM, &data);
//Set up DataBlock ptr for host
data.bitmap = &bitmap;
hipMalloc((void**)&data.dev_bitmap, bitmap.image_size());
//Notation below for function ptrs, pass address of each one for system to call back func when event occurs
//For rendering want to continuously createa and destroy frames to optimise GPU space and to upate image
//First arg is return type, second is input params
bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
}
|
a5da8913f7f46770b6bed996642182a20746ef9c.cu
|
#include <cuda_runtime.h>
#include<iostream>
#include "cpu_anim.h"
using namespace std;
#include <device_launch_parameters.h>
//Window dim
#define DIM 512
//Create struct to hold device and host pointers to image bitmap
struct DataBlock {
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void cleanup(DataBlock *d)
{
cudaFree(d->dev_bitmap);
}
//Call kernel each time for a thread, each thread mapped to pixel which is animated
__global__ void kernel(unsigned char *ptr, int ticks)
{
//Index one of the threads to an image pos
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
//Create varying grey vals depending on pixel
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
//Offset into output buffer for window generation when ready
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
}
//Continuously generate frames as required by window, constantly creating threads for image gen, copying output pix to host, then destroying all processes and repeating
void generate_frame(DataBlock *d, int render)
{
//Create 2D grid of blocks, for every 16 threads there exists one
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel<<<blocks, threads >>>(d->dev_bitmap, render);
//Copy contents at current render/frame, into dev bitmap for changing frame and computations
cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost);
}
int main(void)
{
DataBlock data;
//construct with ref to data, shares same address, CPUAnimBitmap implements void ptr which implies that it can be associated w any data type
CPUAnimBitmap bitmap(DIM, DIM, &data);
//Set up DataBlock ptr for host
data.bitmap = &bitmap;
cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size());
//Notation below for function ptrs, pass address of each one for system to call back func when event occurs
//For rendering want to continuously createa and destroy frames to optimise GPU space and to upate image
//First arg is return type, second is input params
bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
}
|
9ee1443d7bacf42cc9a510124de86d639bea7889.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cudaflow.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus + num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.for_each_index(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_px, N*sizeof(float)) == hipSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_py, N*sizeof(float)) == hipSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_mx, K*sizeof(float)) == hipSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_my, K*sizeof(float)) == hipSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sx, K*sizeof(float)) == hipSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sy, K*sizeof(float)) == hipSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_c, K*sizeof(float)) == hipSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
}).name("h2d");
auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(hipFree(d_px)==hipSuccess);
REQUIRE(hipFree(d_py)==hipSuccess);
REQUIRE(hipFree(d_mx)==hipSuccess);
REQUIRE(hipFree(d_my)==hipSuccess);
REQUIRE(hipFree(d_sx)==hipSuccess);
REQUIRE(hipFree(d_sy)==hipSuccess);
REQUIRE(hipFree(d_c )==hipSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
|
9ee1443d7bacf42cc9a510124de86d639bea7889.cu
|
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cudaflow.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus + num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.for_each_index(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_px, N*sizeof(float)) == cudaSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_py, N*sizeof(float)) == cudaSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_mx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_my, K*sizeof(float)) == cudaSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sy, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_c, K*sizeof(float)) == cudaSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
}).name("h2d");
auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(cudaFree(d_px)==cudaSuccess);
REQUIRE(cudaFree(d_py)==cudaSuccess);
REQUIRE(cudaFree(d_mx)==cudaSuccess);
REQUIRE(cudaFree(d_my)==cudaSuccess);
REQUIRE(cudaFree(d_sx)==cudaSuccess);
REQUIRE(cudaFree(d_sy)==cudaSuccess);
REQUIRE(cudaFree(d_c )==cudaSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(std::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(std::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
|
06a8cb09266d1aededffed08df31c8c8114b65ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void momentum_contiguous_kernel(size_t size,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
const TensorDataType * __restrict__ gradient,
TensorDataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void nesterov_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
template <typename TensorDataType>
void sgd<TensorDataType>::momentum_step_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch GPU kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto multisync = El::MakeMultiSync(gpu::get_sync_info(values),
gpu::get_sync_info(gradient));
if (m_nesterov) {
hydrogen::gpu::LaunchKernel(
nesterov_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
hydrogen::gpu::LaunchKernel(
momentum_contiguous_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
hydrogen::gpu::LaunchKernel(
momentum_noncontiguous_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
#ifdef LBANN_HAS_HALF
template <>
void sgd<cpu_fp16>::momentum_step_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void sgd<T>::momentum_step_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
06a8cb09266d1aededffed08df31c8c8114b65ff.cu
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/sgd.hpp"
namespace lbann {
namespace {
template <typename TensorDataType>
__global__ void momentum_noncontiguous_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < height * width) {
const auto& row = gid % height;
const auto& col = gid / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void momentum_contiguous_kernel(size_t size,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
const TensorDataType * __restrict__ gradient,
TensorDataType * __restrict__ velocity) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid < size) {
const auto& g = gradient[gid];
auto& v = velocity[gid];
auto& x = values[gid];
v = momentum * v + g;
x -= learning_rate * v;
}
}
template <typename TensorDataType>
__global__ void nesterov_kernel(size_t height,
size_t width,
TensorDataType learning_rate,
TensorDataType momentum,
TensorDataType * __restrict__ values,
size_t values_ldim,
const TensorDataType * __restrict__ gradient,
size_t gradient_ldim,
TensorDataType * __restrict__ velocity,
size_t velocity_ldim) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = gridDim.x * blockDim.x;
for (size_t pos = gid; pos < height * width; pos += nthreads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& g = gradient[row + col * gradient_ldim];
auto& v = velocity[row + col * velocity_ldim];
auto& x = values[row + col * values_ldim];
v = momentum * v + g;
x -= learning_rate * (momentum * v + g);
}
}
} // namespace
template <typename TensorDataType>
void sgd<TensorDataType>::momentum_step_gpu(AbsDistMatrixType& values,
const AbsDistMatrixType& gradient) {
// Get matrix dimensions
const size_t local_height = values.LocalHeight();
const size_t local_width = values.LocalWidth();
const size_t local_size = local_height * local_width;
if (local_size <= 0) { return; }
// Launch GPU kernels for momentum SGD or NAG
constexpr size_t block_size = 256;
const size_t grid_size = (local_size + block_size - 1) / block_size;
auto multisync = El::MakeMultiSync(gpu::get_sync_info(values),
gpu::get_sync_info(gradient));
if (m_nesterov) {
hydrogen::gpu::LaunchKernel(
nesterov_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
} else {
if (values.Contiguous() && gradient.Contiguous()
&& m_velocity->Contiguous()) {
hydrogen::gpu::LaunchKernel(
momentum_contiguous_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_size, this->get_learning_rate(), m_momentum,
values.Buffer(), gradient.LockedBuffer(), m_velocity->Buffer());
} else {
hydrogen::gpu::LaunchKernel(
momentum_noncontiguous_kernel<TensorDataType>,
grid_size, block_size, 0, multisync,
local_height, local_width,
this->get_learning_rate(), m_momentum,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_velocity->Buffer(), m_velocity->LDim());
}
}
}
#ifdef LBANN_HAS_HALF
template <>
void sgd<cpu_fp16>::momentum_step_gpu(AbsDistMatrixType&,
const AbsDistMatrixType&) {
LBANN_ERROR("Can't call this function with cpu_fp16!");
}
#endif // LBANN_HAS_HALF
#define PROTO(T) \
template void sgd<T>::momentum_step_gpu( \
El::AbstractDistMatrix<T>&, \
const El::AbstractDistMatrix<T>&)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
3fe38339a5df99157a9e7096e5b400c83b99e5fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------------------
*
* Module : Ion Series
* Copyright : (c) [2009..2011] Kevin Ying
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include <thrust/iterator/counting_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include "utils.h"
#include "device.h"
#include "texture.h"
#include "ion_series.h"
#include "algorithms.h"
#include <stdint.h>
#include <time.h>
#include <stdio.h>
//#define DEBUG
/*
* Scan a warp-sized chunk of data. Because warps execute instructions in SIMD
* fashion, there is no need to synchronise in order to share data. The most
* efficient algorithm is the step-efficient method of Hillis & Steele that
* takes log(N) steps, rather than the work-efficient tree-based algorithm
* described by Blelloch that takes 2 * log(N) steps.
*/
template <class T, bool inclusive>
static __device__ T
scan_warp(T val, volatile T* s_data)
{
const uint32_t idx = threadIdx.x;
const uint32_t lane = threadIdx.x & (WARP_SIZE-1);
/*
* If we double the size of the s_data array and pad the bottom half with
* zero, then we can avoid branching (although there is plenty already).
*
* In device emulation mode, the warp size is 1 and so sync-less operation
* does not work.
*/
s_data[idx] = val; __EMUSYNC;
#ifdef __DEVICE_EMULATION__
val = (lane >= 1) ? s_data[idx - 1] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 2) ? s_data[idx - 2] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 4) ? s_data[idx - 4] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 8) ? s_data[idx - 8] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 16) ? s_data[idx - 16] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
#else
if (lane >= 1) s_data[idx] = val = val + s_data[idx - 1];
if (lane >= 2) s_data[idx] = val = val + s_data[idx - 2];
if (lane >= 4) s_data[idx] = val = val + s_data[idx - 4];
if (lane >= 8) s_data[idx] = val = val + s_data[idx - 8];
if (lane >= 16) s_data[idx] = val = val + s_data[idx - 16];
#endif
if (inclusive) return s_data[idx];
else return (lane > 0) ? s_data[idx - 1] : 0;
}
__inline__ __device__ static float
ionMZ(const float m, const float c)
{
return __fdividef(m + MASS_H * c, c);
}
__inline__ __device__ static uint32_t
binMZ(const float mz)
{
return rintf(__fdividef(mz, BIN_WIDTH_MONO));
}
__inline__ __device__ static void
addIon(uint32_t *d_spec, const uint32_t N, const int32_t x, const uint32_t y)
{
if (0 <= x && x < N) atomicMax(&d_spec[x], y);
}
template <uint32_t charge>
__device__ void
addIonsAB(uint32_t *d_spec, const uint32_t N, const float mass)
{
float m;
int32_t x;
// A-ions
addIon(d_spec, N, binMZ(ionMZ(mass - MASS_CO, charge)), 10);
// B-ions
m = ionMZ(mass, charge);
x = binMZ(m);
addIon(d_spec, N, x, 50);
addIon(d_spec, N, x+1, 25); // technically, should be binMZ(m+1)
addIon(d_spec, N, x-1, 25);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_H2O, charge)), 10);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10);
}
template <uint32_t charge>
__device__ void
addIonsY(uint32_t *d_spec, const uint32_t N, const float mass)
{
float m = ionMZ(mass + MASS_H2O, charge);
int32_t x = binMZ(m);
// Y-ions
addIon(d_spec, N, x, 50);
addIon(d_spec, N, x+1, 25);
addIon(d_spec, N, x-1, 25);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10);
}
template <uint32_t charge>
__device__ void
addIons_k(uint32_t *d_spec, const uint32_t N, const float b_mass, const float y_mass)
{
addIonsAB<charge>(d_spec, N, b_mass);
addIonsY <charge>(d_spec, N, y_mass);
}
/*
* Return the mass of an amino acid residue in atomic mass units, for the given
* short abbreviation.
*/
template <bool UseCache>
__device__ float
getAAMass(const float *d_mass, const char aa)
{
return fetch_x<UseCache>(aa - 'A', d_mass);
}
__device__ __inline__ bool
isToBeModded(const uint32_t *d_mpep_unrank, const uint32_t *d_mod_ma_count, uint32_t ma_idx, uint32_t ith_ma)
{
bool res = false;
uint32_t unrank_idx = 0;
for (uint32_t i = 0; i < ma_idx; ++i)
{
unrank_idx += d_mod_ma_count[i];
}
for (uint32_t i = 0; i < d_mod_ma_count[ma_idx]; ++i)
{
if (d_mpep_unrank[unrank_idx + i] == ith_ma)
res = true;
}
return res;
}
/*
* Generate theoretical spectra for a collection of peptide fragments. The
* 'ions' array contains the individual amino-acid masses for the database
* entries. We are interested in the sequences generated between the terminal
* indices (tc,tn) of the locations specified in the 'idx' array.
*
* A warp of threads iterates between the (tc,tn) indices, generating the b- and
* y-ion mass ladders. A (long) sequence of (slow) global atomic update requests
* is subsequently issued. The input d_spec should be initially zero, and on
* output will contain the theoretical spectra peaks in a dense (although
* mostly zero) matrix.
*/
template <uint32_t BlockSize, uint32_t MaxCharge, bool UseCache, uint32_t NumMA>
__global__ static void
addModIons_core
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec
)
{
assert(BlockSize % WARP_SIZE == 0);
const uint32_t vectorsPerBlock = BlockSize / WARP_SIZE;
const uint32_t numVectors = vectorsPerBlock * gridDim.x;
const uint32_t numThreads = BlockSize * gridDim.x;
const uint32_t thread_id = BlockSize * blockIdx.x + threadIdx.x;
const uint32_t vector_id = thread_id / WARP_SIZE;
const uint32_t thread_lane = threadIdx.x & (WARP_SIZE-1);
__shared__ volatile float s_data[BlockSize];
// Keep a record of ith moddable acid as the pep is traversed
__shared__ volatile uint32_t s_pep_ith_ma[NumMA][BlockSize];
for (uint32_t row = vector_id; row < num_mpep; row += numVectors)
{
const uint32_t pep_idx = d_mpep_pep_idx[row];
const uint32_t mod_idx = d_mpep_pep_mod_idx[row];
const uint32_t unrank_pos = d_mpep_mod_ma_count_sum_scan[row];
const uint32_t *d_mod_ma_count = _d_mod_ma_count + mod_idx*NumMA;
const uint32_t row_start = d_tc[pep_idx];
const uint32_t row_end = d_tn[pep_idx];
const float residual = d_residual[pep_idx] + d_mod_delta[mod_idx];
uint32_t *spec = (uint32_t*) &d_mspec[row * len_spec];
float b_mass;
float y_mass;
s_data[threadIdx.x] = 0;
//s_test[threadIdx.x] = 0;
for (int mod = 0; mod < NumMA; mod++)
{
s_pep_ith_ma[mod][threadIdx.x] = 0;
}
/*
* Have all threads read in values for this segment, writing the
* spectral peaks out to global memory (very, very slowly...)
*/
for (uint32_t j = row_start + thread_lane; j < row_end; j += WARP_SIZE)
{
bool is_ma = false;
uint32_t ma_idx = 0;
/*
* Load the ion mass, and propagate the partial scan results
*/
// is this a modable acid
for (int m = 0; m < NumMA; m++)
{
uint32_t count = 0;
uint8_t mod = GET_ACID_MOD(d_ions[j]);
if (mod != 0) {
is_ma = true;
count = 1;
ma_idx = mod- 1;
}
if (thread_lane == 0) {
count += s_pep_ith_ma[m][threadIdx.x + (WARP_SIZE-1)];
//count += s_test[threadIdx.x + (WARP_SIZE-1)];
}
scan_warp<uint32_t, true>(count, s_pep_ith_ma[m]);
}
uint32_t ith_ma = s_pep_ith_ma[ma_idx][threadIdx.x] - 1;
//if (is_ma)
bool modded = isToBeModded(d_mpep_unrank + unrank_pos, d_mod_ma_count, ma_idx, ith_ma);
//bool modded = isToBeModded(d_mpep_unrank, d_mod_ma_count, ma_idx, ith_ma);
if (is_ma && modded)
{
b_mass = getAAMass<UseCache>(d_mass, GET_ACID_CHAR(d_ions[j])) + d_ma_mass[ma_idx];
#ifdef DEBUG
d_mions[row*MAX_PEP_LEN + j - row_start] = GET_ACID_CHAR(d_ions[j]) + 32;
#endif
} else {
b_mass = getAAMass<UseCache>(d_mass, GET_ACID_CHAR(d_ions[j]));
#ifdef DEBUG
d_mions[row*MAX_PEP_LEN + j - row_start] = GET_ACID_CHAR(d_ions[j]);
#endif
}
//#ifdef DEBUG
//d_mions[row*MAX_PEP_LEN + j - row_start] = (uint8_t)(((int)'0')+(ith_ma));
//#endif
if (thread_lane == 0)
{
b_mass += s_data[threadIdx.x + (WARP_SIZE-1)];
}
/*
* Generate fragment mass ladder
*/
b_mass = scan_warp<float,true>(b_mass, s_data);
y_mass = residual - b_mass;
if (1 <= MaxCharge) addIons_k<1>(spec, len_spec, b_mass, y_mass);
if (2 <= MaxCharge) addIons_k<2>(spec, len_spec, b_mass, y_mass);
if (3 <= MaxCharge) addIons_k<3>(spec, len_spec, b_mass, y_mass);
if (4 <= MaxCharge) addIons_k<4>(spec, len_spec, b_mass, y_mass);
}
}
__syncthreads();
// Now convert everything in spectrum as float
float* d_spec = d_mspec;
size_t pos;
size_t len_d_spec = num_mpep*len_spec;
for (int i = 0; i < len_d_spec / numThreads; ++i)
{
pos = i*numThreads + thread_id;
d_spec[pos] = __int2float_rn(__float_as_int(d_spec[pos]));
}
size_t remainder = len_d_spec % numThreads;
if (thread_id < remainder)
{
pos = len_d_spec - remainder + thread_id;
d_spec[pos] = __int2float_rn(__float_as_int(d_spec[pos]));
}
}
/*
* Select a number of threads and blocks. Each block will have at least one full
* warp, as required by the core kernel
*/
static void
addModIons_control(uint32_t N, uint32_t &blocks, uint32_t &threads)
{
threads = (N < MAX_THREADS) ? max(WARP_SIZE, ceilPow2(N)) : MAX_THREADS;
blocks = (N + threads - 1) / threads;
blocks = min(blocks, MAX_BLOCKS);
}
template <uint32_t MaxCharge, bool UseCache, uint32_t NumMA>
static void
addModIons_dispatch
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *_d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec,
hipStream_t stream
)
{
uint32_t blocks;
uint32_t threads;
if (UseCache)
bind_x(d_mass);
size_t ns = sizeof(float) + NumMA*sizeof(uint32_t);
addModIons_control(num_mpep, blocks, threads);
switch (threads)
{
//case 512:
//case 256:
case 128:hipLaunchKernelGGL(( addModIons_core<128,MaxCharge,UseCache,NumMA>), dim3(blocks),dim3(threads), 128*ns, stream, d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
case 64:hipLaunchKernelGGL(( addModIons_core< 64,MaxCharge,UseCache,NumMA>), dim3(blocks),dim3(threads), 64*ns, stream, d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
case 32:hipLaunchKernelGGL(( addModIons_core< 32,MaxCharge,UseCache,NumMA>), dim3(blocks),dim3(threads), 32*ns, stream, d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
if (UseCache)
unbind_x(d_mass);
}
template <uint32_t NumMA>
static void
addModIons_dispatch_max_charge
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *_d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec,
const uint32_t max_charge,
hipStream_t stream
)
{
switch (max_charge)
{
case 1: addModIons_dispatch<1,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 2: addModIons_dispatch<2,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 3: addModIons_dispatch<3,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 4: addModIons_dispatch<4,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 5: addModIons_dispatch<5,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 6: addModIons_dispatch<6,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 7: addModIons_dispatch<7,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 8: addModIons_dispatch<8,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 9: addModIons_dispatch<9,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 10: addModIons_dispatch<10,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
void addModIons
(
float *d_out_mspec,
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t len_unrank,
const uint32_t num_mpep,
const uint32_t *d_mod_ma_count,
const float *d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t num_ma,
const uint32_t len_spec,
const uint32_t max_charge,
hipStream_t stream
)
{
#ifdef _BENCH
time_t t_beg, t_end;
time(&t_beg);
std::cout << "mion_series" << std::endl;
printGPUMemoryUsage();
#endif
#ifdef DEBUG
thrust::device_vector<uint8_t> d_mions_v(MAX_PEP_LEN*num_mpep);
thrust::device_ptr<uint8_t> d_mions(d_mions_v.data());
#else
thrust::device_ptr<uint8_t> d_mions;
#endif
switch (num_ma)
{
case 1: addModIons_dispatch_max_charge<1>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 2: addModIons_dispatch_max_charge<2>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 3: addModIons_dispatch_max_charge<3>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 4: addModIons_dispatch_max_charge<4>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 5: addModIons_dispatch_max_charge<5>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 6: addModIons_dispatch_max_charge<6>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 7: addModIons_dispatch_max_charge<7>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 8: addModIons_dispatch_max_charge<8>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 9: addModIons_dispatch_max_charge<9>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 10: addModIons_dispatch_max_charge<10>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
default:
assert(!"Non-exhaustive patterns in match");
}
#ifdef DEBUG
std::cout << "Checking generated spectrums" << std::endl;
// To check the spectrums above, create these modified peptides serially and call addIons to create spectrums. Then compare the two spectrums
// NB currently only allows 1 alternative mass for an acid. lowercase is used for the modified mass
printGPUMemoryUsage();
std::cout << "to alloc " << len_spec*num_mpep*sizeof(float) << std::endl;
thrust::device_vector<float> d_out_check_spec(len_spec*num_mpep);
std::cout << "475" << std::endl;
getSpecNonParallel(d_out_check_spec.data().get(),
d_mions.get(),
d_residual, d_mass, d_ions, d_tc, d_tn,
d_mpep_pep_idx, d_mpep_pep_mod_idx,
d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, len_unrank, num_mpep,
d_mod_ma_count, d_mod_delta,
d_ma, d_ma_mass, num_ma,
max_charge, len_spec);
std::cout << "485" << std::endl;
// compare
thrust::device_ptr<float> d_out_mspec_th(d_out_mspec);
if (!thrust::equal(d_out_check_spec.begin(), d_out_check_spec.end(), d_out_mspec_th)) {
std::cerr << "Spectrums doesn't seem to be correct" << std::endl;
uint32_t cnt = 0;
for (uint32_t i = 0; i < num_mpep; ++i) {
for (uint32_t j = 0; j < len_spec; ++j) {
uint32_t pos = i*len_spec + j;
if (d_out_check_spec[pos] != d_out_mspec_th[pos]) {
std::cout << "check " << d_out_check_spec[pos] << " != " << d_out_mspec_th[pos] << std::endl;
++cnt;
break;
} else {
}
}
}
std::cout << "num specs not right: " << cnt << " out of " << num_mpep << std::endl;
exit(1);
} else {
std::cout << "spectrum seems ok" << std:: endl;
}
#endif
#ifdef _BENCH
time(&t_end);
printf ("Time elapsed for addModIons: %.2lf seconds\n", difftime(t_end,t_beg));
printGPUMemoryUsage();
#endif
}
#undef DEBUG
|
3fe38339a5df99157a9e7096e5b400c83b99e5fc.cu
|
/* -----------------------------------------------------------------------------
*
* Module : Ion Series
* Copyright : (c) [2009..2011] Kevin Ying
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include <thrust/iterator/counting_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include "utils.h"
#include "device.h"
#include "texture.h"
#include "ion_series.h"
#include "algorithms.h"
#include <stdint.h>
#include <time.h>
#include <stdio.h>
//#define DEBUG
/*
* Scan a warp-sized chunk of data. Because warps execute instructions in SIMD
* fashion, there is no need to synchronise in order to share data. The most
* efficient algorithm is the step-efficient method of Hillis & Steele that
* takes log(N) steps, rather than the work-efficient tree-based algorithm
* described by Blelloch that takes 2 * log(N) steps.
*/
template <class T, bool inclusive>
static __device__ T
scan_warp(T val, volatile T* s_data)
{
const uint32_t idx = threadIdx.x;
const uint32_t lane = threadIdx.x & (WARP_SIZE-1);
/*
* If we double the size of the s_data array and pad the bottom half with
* zero, then we can avoid branching (although there is plenty already).
*
* In device emulation mode, the warp size is 1 and so sync-less operation
* does not work.
*/
s_data[idx] = val; __EMUSYNC;
#ifdef __DEVICE_EMULATION__
val = (lane >= 1) ? s_data[idx - 1] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 2) ? s_data[idx - 2] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 4) ? s_data[idx - 4] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 8) ? s_data[idx - 8] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
val = (lane >= 16) ? s_data[idx - 16] : 0; __EMUSYNC; s_data[idx] += val; __EMUSYNC;
#else
if (lane >= 1) s_data[idx] = val = val + s_data[idx - 1];
if (lane >= 2) s_data[idx] = val = val + s_data[idx - 2];
if (lane >= 4) s_data[idx] = val = val + s_data[idx - 4];
if (lane >= 8) s_data[idx] = val = val + s_data[idx - 8];
if (lane >= 16) s_data[idx] = val = val + s_data[idx - 16];
#endif
if (inclusive) return s_data[idx];
else return (lane > 0) ? s_data[idx - 1] : 0;
}
__inline__ __device__ static float
ionMZ(const float m, const float c)
{
return __fdividef(m + MASS_H * c, c);
}
__inline__ __device__ static uint32_t
binMZ(const float mz)
{
return rintf(__fdividef(mz, BIN_WIDTH_MONO));
}
__inline__ __device__ static void
addIon(uint32_t *d_spec, const uint32_t N, const int32_t x, const uint32_t y)
{
if (0 <= x && x < N) atomicMax(&d_spec[x], y);
}
template <uint32_t charge>
__device__ void
addIonsAB(uint32_t *d_spec, const uint32_t N, const float mass)
{
float m;
int32_t x;
// A-ions
addIon(d_spec, N, binMZ(ionMZ(mass - MASS_CO, charge)), 10);
// B-ions
m = ionMZ(mass, charge);
x = binMZ(m);
addIon(d_spec, N, x, 50);
addIon(d_spec, N, x+1, 25); // technically, should be binMZ(m+1)
addIon(d_spec, N, x-1, 25);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_H2O, charge)), 10);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10);
}
template <uint32_t charge>
__device__ void
addIonsY(uint32_t *d_spec, const uint32_t N, const float mass)
{
float m = ionMZ(mass + MASS_H2O, charge);
int32_t x = binMZ(m);
// Y-ions
addIon(d_spec, N, x, 50);
addIon(d_spec, N, x+1, 25);
addIon(d_spec, N, x-1, 25);
addIon(d_spec, N, binMZ(m - __fdividef(MASS_NH3, charge)), 10);
}
template <uint32_t charge>
__device__ void
addIons_k(uint32_t *d_spec, const uint32_t N, const float b_mass, const float y_mass)
{
addIonsAB<charge>(d_spec, N, b_mass);
addIonsY <charge>(d_spec, N, y_mass);
}
/*
* Return the mass of an amino acid residue in atomic mass units, for the given
* short abbreviation.
*/
template <bool UseCache>
__device__ float
getAAMass(const float *d_mass, const char aa)
{
return fetch_x<UseCache>(aa - 'A', d_mass);
}
__device__ __inline__ bool
isToBeModded(const uint32_t *d_mpep_unrank, const uint32_t *d_mod_ma_count, uint32_t ma_idx, uint32_t ith_ma)
{
bool res = false;
uint32_t unrank_idx = 0;
for (uint32_t i = 0; i < ma_idx; ++i)
{
unrank_idx += d_mod_ma_count[i];
}
for (uint32_t i = 0; i < d_mod_ma_count[ma_idx]; ++i)
{
if (d_mpep_unrank[unrank_idx + i] == ith_ma)
res = true;
}
return res;
}
/*
* Generate theoretical spectra for a collection of peptide fragments. The
* 'ions' array contains the individual amino-acid masses for the database
* entries. We are interested in the sequences generated between the terminal
* indices (tc,tn) of the locations specified in the 'idx' array.
*
* A warp of threads iterates between the (tc,tn) indices, generating the b- and
* y-ion mass ladders. A (long) sequence of (slow) global atomic update requests
* is subsequently issued. The input d_spec should be initially zero, and on
* output will contain the theoretical spectra peaks in a dense (although
* mostly zero) matrix.
*/
template <uint32_t BlockSize, uint32_t MaxCharge, bool UseCache, uint32_t NumMA>
__global__ static void
addModIons_core
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec
)
{
assert(BlockSize % WARP_SIZE == 0);
const uint32_t vectorsPerBlock = BlockSize / WARP_SIZE;
const uint32_t numVectors = vectorsPerBlock * gridDim.x;
const uint32_t numThreads = BlockSize * gridDim.x;
const uint32_t thread_id = BlockSize * blockIdx.x + threadIdx.x;
const uint32_t vector_id = thread_id / WARP_SIZE;
const uint32_t thread_lane = threadIdx.x & (WARP_SIZE-1);
__shared__ volatile float s_data[BlockSize];
// Keep a record of ith moddable acid as the pep is traversed
__shared__ volatile uint32_t s_pep_ith_ma[NumMA][BlockSize];
for (uint32_t row = vector_id; row < num_mpep; row += numVectors)
{
const uint32_t pep_idx = d_mpep_pep_idx[row];
const uint32_t mod_idx = d_mpep_pep_mod_idx[row];
const uint32_t unrank_pos = d_mpep_mod_ma_count_sum_scan[row];
const uint32_t *d_mod_ma_count = _d_mod_ma_count + mod_idx*NumMA;
const uint32_t row_start = d_tc[pep_idx];
const uint32_t row_end = d_tn[pep_idx];
const float residual = d_residual[pep_idx] + d_mod_delta[mod_idx];
uint32_t *spec = (uint32_t*) &d_mspec[row * len_spec];
float b_mass;
float y_mass;
s_data[threadIdx.x] = 0;
//s_test[threadIdx.x] = 0;
for (int mod = 0; mod < NumMA; mod++)
{
s_pep_ith_ma[mod][threadIdx.x] = 0;
}
/*
* Have all threads read in values for this segment, writing the
* spectral peaks out to global memory (very, very slowly...)
*/
for (uint32_t j = row_start + thread_lane; j < row_end; j += WARP_SIZE)
{
bool is_ma = false;
uint32_t ma_idx = 0;
/*
* Load the ion mass, and propagate the partial scan results
*/
// is this a modable acid
for (int m = 0; m < NumMA; m++)
{
uint32_t count = 0;
uint8_t mod = GET_ACID_MOD(d_ions[j]);
if (mod != 0) {
is_ma = true;
count = 1;
ma_idx = mod- 1;
}
if (thread_lane == 0) {
count += s_pep_ith_ma[m][threadIdx.x + (WARP_SIZE-1)];
//count += s_test[threadIdx.x + (WARP_SIZE-1)];
}
scan_warp<uint32_t, true>(count, s_pep_ith_ma[m]);
}
uint32_t ith_ma = s_pep_ith_ma[ma_idx][threadIdx.x] - 1;
//if (is_ma)
bool modded = isToBeModded(d_mpep_unrank + unrank_pos, d_mod_ma_count, ma_idx, ith_ma);
//bool modded = isToBeModded(d_mpep_unrank, d_mod_ma_count, ma_idx, ith_ma);
if (is_ma && modded)
{
b_mass = getAAMass<UseCache>(d_mass, GET_ACID_CHAR(d_ions[j])) + d_ma_mass[ma_idx];
#ifdef DEBUG
d_mions[row*MAX_PEP_LEN + j - row_start] = GET_ACID_CHAR(d_ions[j]) + 32;
#endif
} else {
b_mass = getAAMass<UseCache>(d_mass, GET_ACID_CHAR(d_ions[j]));
#ifdef DEBUG
d_mions[row*MAX_PEP_LEN + j - row_start] = GET_ACID_CHAR(d_ions[j]);
#endif
}
//#ifdef DEBUG
//d_mions[row*MAX_PEP_LEN + j - row_start] = (uint8_t)(((int)'0')+(ith_ma));
//#endif
if (thread_lane == 0)
{
b_mass += s_data[threadIdx.x + (WARP_SIZE-1)];
}
/*
* Generate fragment mass ladder
*/
b_mass = scan_warp<float,true>(b_mass, s_data);
y_mass = residual - b_mass;
if (1 <= MaxCharge) addIons_k<1>(spec, len_spec, b_mass, y_mass);
if (2 <= MaxCharge) addIons_k<2>(spec, len_spec, b_mass, y_mass);
if (3 <= MaxCharge) addIons_k<3>(spec, len_spec, b_mass, y_mass);
if (4 <= MaxCharge) addIons_k<4>(spec, len_spec, b_mass, y_mass);
}
}
__syncthreads();
// Now convert everything in spectrum as float
float* d_spec = d_mspec;
size_t pos;
size_t len_d_spec = num_mpep*len_spec;
for (int i = 0; i < len_d_spec / numThreads; ++i)
{
pos = i*numThreads + thread_id;
d_spec[pos] = __int2float_rn(__float_as_int(d_spec[pos]));
}
size_t remainder = len_d_spec % numThreads;
if (thread_id < remainder)
{
pos = len_d_spec - remainder + thread_id;
d_spec[pos] = __int2float_rn(__float_as_int(d_spec[pos]));
}
}
/*
* Select a number of threads and blocks. Each block will have at least one full
* warp, as required by the core kernel
*/
static void
addModIons_control(uint32_t N, uint32_t &blocks, uint32_t &threads)
{
threads = (N < MAX_THREADS) ? max(WARP_SIZE, ceilPow2(N)) : MAX_THREADS;
blocks = (N + threads - 1) / threads;
blocks = min(blocks, MAX_BLOCKS);
}
template <uint32_t MaxCharge, bool UseCache, uint32_t NumMA>
static void
addModIons_dispatch
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *_d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec,
cudaStream_t stream
)
{
uint32_t blocks;
uint32_t threads;
if (UseCache)
bind_x(d_mass);
size_t ns = sizeof(float) + NumMA*sizeof(uint32_t);
addModIons_control(num_mpep, blocks, threads);
switch (threads)
{
//case 512:
//case 256:
case 128: addModIons_core<128,MaxCharge,UseCache,NumMA><<<blocks,threads, 128*ns, stream>>>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
case 64: addModIons_core< 64,MaxCharge,UseCache,NumMA><<<blocks,threads, 64*ns, stream>>>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
case 32: addModIons_core< 32,MaxCharge,UseCache,NumMA><<<blocks,threads, 32*ns, stream>>>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec); break;
default:
assert(!"Non-exhaustive patterns in match");
}
if (UseCache)
unbind_x(d_mass);
}
template <uint32_t NumMA>
static void
addModIons_dispatch_max_charge
(
float *d_mspec,
uint8_t *d_mions, // For debugging, records ions in char form
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t num_mpep,
const uint32_t *_d_mod_ma_count,
const float *_d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t len_spec,
const uint32_t max_charge,
cudaStream_t stream
)
{
switch (max_charge)
{
case 1: addModIons_dispatch<1,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 2: addModIons_dispatch<2,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 3: addModIons_dispatch<3,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 4: addModIons_dispatch<4,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 5: addModIons_dispatch<5,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 6: addModIons_dispatch<6,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 7: addModIons_dispatch<7,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 8: addModIons_dispatch<8,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 9: addModIons_dispatch<9,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
case 10: addModIons_dispatch<10,true,NumMA>(d_mspec, d_mions, d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, _d_mod_ma_count, _d_mod_delta, d_ma, d_ma_mass, len_spec, stream); break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
void addModIons
(
float *d_out_mspec,
const float *d_residual, // peptide residual mass
const float *d_mass, // lookup table for ion character codes ['A'..'Z']
const uint8_t *d_ions, // individual ion character codes (the database)
const uint32_t *d_tc, // c-terminal indices
const uint32_t *d_tn, // n-terminal indices
const uint32_t *d_mpep_pep_idx,
const uint32_t *d_mpep_pep_mod_idx,
const uint32_t *d_mpep_unrank,
const uint32_t *d_mpep_mod_ma_count_sum_scan,
const uint32_t len_unrank,
const uint32_t num_mpep,
const uint32_t *d_mod_ma_count,
const float *d_mod_delta,
const uint8_t *d_ma,
const float *d_ma_mass,
const uint32_t num_ma,
const uint32_t len_spec,
const uint32_t max_charge,
cudaStream_t stream
)
{
#ifdef _BENCH
time_t t_beg, t_end;
time(&t_beg);
std::cout << "mion_series" << std::endl;
printGPUMemoryUsage();
#endif
#ifdef DEBUG
thrust::device_vector<uint8_t> d_mions_v(MAX_PEP_LEN*num_mpep);
thrust::device_ptr<uint8_t> d_mions(d_mions_v.data());
#else
thrust::device_ptr<uint8_t> d_mions;
#endif
switch (num_ma)
{
case 1: addModIons_dispatch_max_charge<1>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 2: addModIons_dispatch_max_charge<2>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 3: addModIons_dispatch_max_charge<3>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 4: addModIons_dispatch_max_charge<4>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 5: addModIons_dispatch_max_charge<5>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 6: addModIons_dispatch_max_charge<6>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 7: addModIons_dispatch_max_charge<7>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 8: addModIons_dispatch_max_charge<8>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 9: addModIons_dispatch_max_charge<9>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
case 10: addModIons_dispatch_max_charge<10>(d_out_mspec, d_mions.get(), d_residual, d_mass, d_ions, d_tc, d_tn, d_mpep_pep_idx, d_mpep_pep_mod_idx, d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, num_mpep, d_mod_ma_count, d_mod_delta, d_ma, d_ma_mass, len_spec, max_charge, stream); break;
default:
assert(!"Non-exhaustive patterns in match");
}
#ifdef DEBUG
std::cout << "Checking generated spectrums" << std::endl;
// To check the spectrums above, create these modified peptides serially and call addIons to create spectrums. Then compare the two spectrums
// NB currently only allows 1 alternative mass for an acid. lowercase is used for the modified mass
printGPUMemoryUsage();
std::cout << "to alloc " << len_spec*num_mpep*sizeof(float) << std::endl;
thrust::device_vector<float> d_out_check_spec(len_spec*num_mpep);
std::cout << "475" << std::endl;
getSpecNonParallel(d_out_check_spec.data().get(),
d_mions.get(),
d_residual, d_mass, d_ions, d_tc, d_tn,
d_mpep_pep_idx, d_mpep_pep_mod_idx,
d_mpep_unrank, d_mpep_mod_ma_count_sum_scan, len_unrank, num_mpep,
d_mod_ma_count, d_mod_delta,
d_ma, d_ma_mass, num_ma,
max_charge, len_spec);
std::cout << "485" << std::endl;
// compare
thrust::device_ptr<float> d_out_mspec_th(d_out_mspec);
if (!thrust::equal(d_out_check_spec.begin(), d_out_check_spec.end(), d_out_mspec_th)) {
std::cerr << "Spectrums doesn't seem to be correct" << std::endl;
uint32_t cnt = 0;
for (uint32_t i = 0; i < num_mpep; ++i) {
for (uint32_t j = 0; j < len_spec; ++j) {
uint32_t pos = i*len_spec + j;
if (d_out_check_spec[pos] != d_out_mspec_th[pos]) {
std::cout << "check " << d_out_check_spec[pos] << " != " << d_out_mspec_th[pos] << std::endl;
++cnt;
break;
} else {
}
}
}
std::cout << "num specs not right: " << cnt << " out of " << num_mpep << std::endl;
exit(1);
} else {
std::cout << "spectrum seems ok" << std:: endl;
}
#endif
#ifdef _BENCH
time(&t_end);
printf ("Time elapsed for addModIons: %.2lf seconds\n", difftime(t_end,t_beg));
printGPUMemoryUsage();
#endif
}
#undef DEBUG
|
31f3110324a7e703dde3201774e45cec7137f138.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdlib.h"
__global__ void threadadd(float *a, float *b, float *out, int n) {
int tid = threadIdx.x; // this thread handles the data at its thread id
if (tid < n)
out[tid] = a[tid] + b[tid];
}
extern "C" int threadmain(float *a, float *b, float *out, int n) {
float *dev_a, *dev_b, *dev_out;
// allocate the memory on the GPU
hipMalloc((void**)&dev_a, n*sizeof(float));
hipMalloc((void**)&dev_b, n*sizeof(float));
hipMalloc((void**)&dev_out, n*sizeof(float));
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( threadadd), dim3(1),dim3(n), 0, 0, dev_a, dev_b, dev_out, n);
// copy the array 'out' back from the GPU to the CPU
hipMemcpy(out, dev_out, n * sizeof(float), hipMemcpyDeviceToHost);
// free the memory allocated on the GPU
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_out);
return 0;
}
|
31f3110324a7e703dde3201774e45cec7137f138.cu
|
#include "cuda.h"
#include "stdio.h"
#include "stdlib.h"
__global__ void threadadd(float *a, float *b, float *out, int n) {
int tid = threadIdx.x; // this thread handles the data at its thread id
if (tid < n)
out[tid] = a[tid] + b[tid];
}
extern "C" int threadmain(float *a, float *b, float *out, int n) {
float *dev_a, *dev_b, *dev_out;
// allocate the memory on the GPU
cudaMalloc((void**)&dev_a, n*sizeof(float));
cudaMalloc((void**)&dev_b, n*sizeof(float));
cudaMalloc((void**)&dev_out, n*sizeof(float));
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice);
threadadd<<<1,n>>>(dev_a, dev_b, dev_out, n);
// copy the array 'out' back from the GPU to the CPU
cudaMemcpy(out, dev_out, n * sizeof(float), cudaMemcpyDeviceToHost);
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_out);
return 0;
}
|
c4cd34d85a64e2d10f7b3e6a6cb02869a052c381.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <dslash.h>
#include <worker.h>
#include <dslash_helper.cuh>
#include <color_spinor_field_order.h>
#include <gauge_field_order.h>
#include <color_spinor.h>
#include <dslash_helper.cuh>
#include <index_helper.cuh>
#include <gauge_field.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_staggered.cuh>
/**
This is a staggered Dirac operator
*/
namespace quda
{
template <typename Arg> class Staggered : public Dslash<staggered, Arg>
{
using Dslash = Dslash<staggered, Arg>;
using Dslash::arg;
using Dslash::in;
public:
Staggered(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in) {}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
Dslash::template instantiate<packStaggeredShmem>(tp, stream);
}
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
long long flops() const
{
int mv_flops = (8 * in.Ncolor() - 2) * in.Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2 * in.Ncolor() * in.Nspin());
int xpay_flops = 2 * 2 * in.Ncolor() * in.Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // hard code factor of 4 in direction since fields may be 5-d
long long flops_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops_ = ghost_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
flops_ = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
flops_ = (2 * num_dir * mv_flops + // SU(3) matrix-vector multiplies
(2 * num_dir - 1) * 2 * in.Ncolor() * in.Nspin())
* sites; // accumulation
if (arg.xpay) flops_ += xpay_flops * sites; // axpy is always on interior
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops_ -= ghost_flops * ghost_sites;
break;
}
}
return flops_;
}
long long bytes() const
{
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in.Precision();
int gauge_bytes_long = arg.reconstruct * in.Precision();
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat)
+ 3 * 2 * spinor_bytes; // last term is the accumulator load/store through the face
int num_dir = 2 * 4; // set to 4-d since we take care of 5-d fermions in derived classes where necessary
long long bytes_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes_ = ghost_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
bytes_ = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
bytes_ = (num_dir * (gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir * 2 * spinor_bytes + // spinor reads
spinor_bytes)
* sites; // spinor write
if (arg.xpay) bytes_ += spinor_bytes;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes_ -= ghost_bytes * ghost_sites;
break;
}
}
return bytes_;
}
};
template <typename Float, int nColor, QudaReconstructType recon_l> struct ImprovedStaggeredApply {
inline ImprovedStaggeredApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &L,
const GaugeField &U, double a, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4; // MWTODO: this probably should be 5 for mrhs Dslash
constexpr bool improved = true;
constexpr QudaReconstructType recon_u = QUDA_RECONSTRUCT_NO;
StaggeredArg<Float, nColor, nDim, recon_u, recon_l, improved> arg(out, in, U, L, a, x, parity, dagger,
comm_override);
Staggered<decltype(arg)> staggered(arg, out, in);
dslash::DslashPolicyTune<decltype(staggered)> policy(
staggered, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
void ApplyImprovedStaggered(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const GaugeField &L, double a, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_STAGGERED_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, L);
// check all locations match
checkLocation(out, in, U, L);
for (int i = 0; i < 4; i++) {
if (comm_dim_partitioned(i) && (U.X()[i] < 6)) {
errorQuda(
"ERROR: partitioned dimension with local size less than 6 is not supported in improved staggered dslash\n");
}
}
// L must be first gauge field argument since we template on long reconstruct
instantiate<ImprovedStaggeredApply, StaggeredReconstruct>(out, in, L, U, a, x, parity, dagger, comm_override,
profile);
#else
errorQuda("Staggered dslash has not been built");
#endif
}
} // namespace quda
|
c4cd34d85a64e2d10f7b3e6a6cb02869a052c381.cu
|
#include <dslash.h>
#include <worker.h>
#include <dslash_helper.cuh>
#include <color_spinor_field_order.h>
#include <gauge_field_order.h>
#include <color_spinor.h>
#include <dslash_helper.cuh>
#include <index_helper.cuh>
#include <gauge_field.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_staggered.cuh>
/**
This is a staggered Dirac operator
*/
namespace quda
{
template <typename Arg> class Staggered : public Dslash<staggered, Arg>
{
using Dslash = Dslash<staggered, Arg>;
using Dslash::arg;
using Dslash::in;
public:
Staggered(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) : Dslash(arg, out, in) {}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash::setParam(tp);
Dslash::template instantiate<packStaggeredShmem>(tp, stream);
}
/*
per direction / dimension flops
SU(3) matrix-vector flops = (8 Nc - 2) * Nc
xpay = 2 * 2 * Nc * Ns
So for the full dslash we have
flops = (2 * 2 * Nd * (8*Nc-2) * Nc) + ((2 * 2 * Nd - 1) * 2 * Nc * Ns)
flops_xpay = flops + 2 * 2 * Nc * Ns
For Asqtad this should give 1146 for Nc=3,Ns=2 and 1158 for the axpy equivalent
*/
long long flops() const
{
int mv_flops = (8 * in.Ncolor() - 2) * in.Ncolor(); // SU(3) matrix-vector flops
int ghost_flops = (3 + 1) * (mv_flops + 2 * in.Ncolor() * in.Nspin());
int xpay_flops = 2 * 2 * in.Ncolor() * in.Nspin(); // multiply and add per real component
int num_dir = 2 * 4; // hard code factor of 4 in direction since fields may be 5-d
long long flops_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: flops_ = ghost_flops * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
flops_ = ghost_flops * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
flops_ = (2 * num_dir * mv_flops + // SU(3) matrix-vector multiplies
(2 * num_dir - 1) * 2 * in.Ncolor() * in.Nspin())
* sites; // accumulation
if (arg.xpay) flops_ += xpay_flops * sites; // axpy is always on interior
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops_ -= ghost_flops * ghost_sites;
break;
}
}
return flops_;
}
long long bytes() const
{
int gauge_bytes_fat = QUDA_RECONSTRUCT_NO * in.Precision();
int gauge_bytes_long = arg.reconstruct * in.Precision();
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
int ghost_bytes = 3 * (spinor_bytes + gauge_bytes_long) + (spinor_bytes + gauge_bytes_fat)
+ 3 * 2 * spinor_bytes; // last term is the accumulator load/store through the face
int num_dir = 2 * 4; // set to 4-d since we take care of 5-d fermions in derived classes where necessary
long long bytes_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes_ = ghost_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
bytes_ = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
bytes_ = (num_dir * (gauge_bytes_fat + gauge_bytes_long) + // gauge reads
num_dir * 2 * spinor_bytes + // spinor reads
spinor_bytes)
* sites; // spinor write
if (arg.xpay) bytes_ += spinor_bytes;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes_ -= ghost_bytes * ghost_sites;
break;
}
}
return bytes_;
}
};
template <typename Float, int nColor, QudaReconstructType recon_l> struct ImprovedStaggeredApply {
inline ImprovedStaggeredApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &L,
const GaugeField &U, double a, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 4; // MWTODO: this probably should be 5 for mrhs Dslash
constexpr bool improved = true;
constexpr QudaReconstructType recon_u = QUDA_RECONSTRUCT_NO;
StaggeredArg<Float, nColor, nDim, recon_u, recon_l, improved> arg(out, in, U, L, a, x, parity, dagger,
comm_override);
Staggered<decltype(arg)> staggered(arg, out, in);
dslash::DslashPolicyTune<decltype(staggered)> policy(
staggered, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
void ApplyImprovedStaggered(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
const GaugeField &L, double a, const ColorSpinorField &x, int parity, bool dagger,
const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_STAGGERED_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U, L);
// check all locations match
checkLocation(out, in, U, L);
for (int i = 0; i < 4; i++) {
if (comm_dim_partitioned(i) && (U.X()[i] < 6)) {
errorQuda(
"ERROR: partitioned dimension with local size less than 6 is not supported in improved staggered dslash\n");
}
}
// L must be first gauge field argument since we template on long reconstruct
instantiate<ImprovedStaggeredApply, StaggeredReconstruct>(out, in, L, U, a, x, parity, dagger, comm_override,
profile);
#else
errorQuda("Staggered dslash has not been built");
#endif
}
} // namespace quda
|
e7c2a85f9ae2c8f022d7d767265f5751d714f954.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
__global__ void myfirstkernel(void) {
}
int main(void) {
myfirstkernel << <1, 1 >> >();
printf("Hello, CUDA!\n");
return 0;
}
|
e7c2a85f9ae2c8f022d7d767265f5751d714f954.cu
|
#include <iostream>
#include <stdio.h>
__global__ void myfirstkernel(void) {
}
int main(void) {
myfirstkernel << <1, 1 >> >();
printf("Hello, CUDA!\n");
return 0;
}
|
FillOnes.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
}
|
FillOnes.cu
|
#include "includes.h"
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
}
|
657f3143f2e09a7fcd31a5384869d9344f2d88e6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
|
657f3143f2e09a7fcd31a5384869d9344f2d88e6.cu
|
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
ef7914c2abd9308428cb1c119346da736d192524.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t unsigned int
#define uint64_t unsigned long long
using namespace std;
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0)
hipEvent_t start, stop;
float t;
void time_start() {
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
CSC(hipEventRecord(start, 0));
}
void time_end() {
CSC(hipGetLastError());
CSC(hipEventRecord(stop, 0));
CSC(hipEventSynchronize(stop));
CSC(hipEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(stop));
}
//
// main programm
//
// __global__ void kernel(uchar4 *data, uint32_t w, uint32_t h, int cnum) {
// int idx = blockDim.x * blockIdx.x + threadIdx.x;
// int idy = blockDim.y * blockIdx.y + threadIdx.y;
// int offsetx = blockDim.x * gridDim.x;
// int offsety = blockDim.y * gridDim.y;
//
// for (int x = idx; x < w; x += offsetx) {
// for (int y = idy; y < h; y += offsety) {
// classify(p(x, y), cnum);
// }
// }
// }
//
#define get(m, x, y) m[y*n + x]
#define gett(m, x, y) get(m, y, x)
#define getl(m, x, y) gett(m, x, y)
#define getu(m, x, y) gett(m, x, y)
struct cmpr {
__host__ __device__ bool operator()(double a, double b) {
return fabs(a) < fabs(b);
}
};
__global__ void k_swapcolumns(double *dm, const uint32_t n, const uint32_t fr, const uint32_t to) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
double tmp = 0;
for (int i = idx; i < n; i += offsetx) {
tmp = get(dm, i, fr);
get(dm, i, fr) = get(dm, i, to);
get(dm, i, to) = tmp;
}
}
__global__ void k_lucol(double *dm, const uint32_t n, const uint32_t i) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
<<<<<<< HEAD
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsety = blockDim.y * gridDim.y;
double d = get(dlu, i, i);
for (int k = idy+i+1; k < n; k += offsety) {
for(int j = idx+i+1; j < n; j += offsetx) {
getu(dlu, j, k) -= getl(dlu, j, i) / d * gett(dlu, i, k);
=======
for(int j = idx; j < n; j += offsetx) {
if (j <= i) continue;
for (int k = 0; k < i; ++k) {
getu(dm, i, j) -= getl(dm, i, k) * getu(dm, k, j);
}
for (int k = 0; k < i; ++k) {
getl(dm, j, i) -= getl(dm, j, k) * getu(dm, k, i);
>>>>>>> parent of a5cf176... added PGP and MM reports
}
getl(dm, j, i) /= get(dm, i, i);
}
}
int main() {
cout.precision(10);
cout.setf(ios::scientific);
int n;
cin >> n;
int size = n * n;
double hm[size];
int swaps[n];
for (int i = 0; i < n; ++i) {
swaps[i] = i;
for (int j = 0; j < n; ++j) {
cin >> gett(hm, i, j); // transpose of original matrix
}
}
double *dm = NULL;
CSC(hipMalloc(&dm, sizeof(double) * size));
CSC(hipMemcpy(dm, hm, sizeof(double) * size, hipMemcpyHostToDevice));
for (int r = 0; r < n-1; ++r) {
// find a max in row(columns) using thrust
thrust::device_ptr<double> dp = thrust::device_pointer_cast(dm + r*n + r);
thrust::device_ptr<double> mp = thrust::max_element(dp, dp + n - r,hipLaunchKernelGGL(( cmpr()));
, , < HEAD
int to = r + mp - dp, t;
t = to;
while (swaps[t] != -1) t = swaps[t];
swaps[t] = r;
k_swapcolumns, 256, 256, 0, 0, 0, 0, dlu, n, r, to); CSC(hipGetLastError());
hipLaunchKernelGGL(( k_lucol), dim3(dim3(16, 16)), dim3(dim3(16, 16)), 0, 0, dlu, n, r); CSC(hipGetLastError());
// calc L
//for (int i = r+1; i < n; ++i) gett(hm, r, i) /= gett(hm, r, r);
=======
int to = r + mp - dp, tmp;
tmp = swaps[to], swaps[to] = swaps[r], swaps[r] = tmp;
hipLaunchKernelGGL(( k_swapcolumns), dim3(256), dim3(256), 0, 0, dm, n, r, to);
hipLaunchKernelGGL(( k_lucol), dim3(256), dim3(256), 0, 0, dm, n, r);
>>>>>>> parent of a5cf176... added PGP and MM reports
}
CSC(hipMemcpy(hm, dm, sizeof(double) * size, hipMemcpyDeviceToHost));
CSC(hipFree(dm));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
cout << gett(hm, i, j) << ' ';
}
cout << endl;
}
for (int i = 0; i < n; ++i) {
cout << swaps[i] << ' ';
}
cout << endl;
}
|
ef7914c2abd9308428cb1c119346da736d192524.cu
|
#include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t unsigned int
#define uint64_t unsigned long long
using namespace std;
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
cudaEvent_t start, stop;
float t;
void time_start() {
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));
}
void time_end() {
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
}
//
// main programm
//
// __global__ void kernel(uchar4 *data, uint32_t w, uint32_t h, int cnum) {
// int idx = blockDim.x * blockIdx.x + threadIdx.x;
// int idy = blockDim.y * blockIdx.y + threadIdx.y;
// int offsetx = blockDim.x * gridDim.x;
// int offsety = blockDim.y * gridDim.y;
//
// for (int x = idx; x < w; x += offsetx) {
// for (int y = idy; y < h; y += offsety) {
// classify(p(x, y), cnum);
// }
// }
// }
//
#define get(m, x, y) m[y*n + x]
#define gett(m, x, y) get(m, y, x)
#define getl(m, x, y) gett(m, x, y)
#define getu(m, x, y) gett(m, x, y)
struct cmpr {
__host__ __device__ bool operator()(double a, double b) {
return fabs(a) < fabs(b);
}
};
__global__ void k_swapcolumns(double *dm, const uint32_t n, const uint32_t fr, const uint32_t to) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
double tmp = 0;
for (int i = idx; i < n; i += offsetx) {
tmp = get(dm, i, fr);
get(dm, i, fr) = get(dm, i, to);
get(dm, i, to) = tmp;
}
}
__global__ void k_lucol(double *dm, const uint32_t n, const uint32_t i) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
<<<<<<< HEAD
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsety = blockDim.y * gridDim.y;
double d = get(dlu, i, i);
for (int k = idy+i+1; k < n; k += offsety) {
for(int j = idx+i+1; j < n; j += offsetx) {
getu(dlu, j, k) -= getl(dlu, j, i) / d * gett(dlu, i, k);
=======
for(int j = idx; j < n; j += offsetx) {
if (j <= i) continue;
for (int k = 0; k < i; ++k) {
getu(dm, i, j) -= getl(dm, i, k) * getu(dm, k, j);
}
for (int k = 0; k < i; ++k) {
getl(dm, j, i) -= getl(dm, j, k) * getu(dm, k, i);
>>>>>>> parent of a5cf176... added PGP and MM reports
}
getl(dm, j, i) /= get(dm, i, i);
}
}
int main() {
cout.precision(10);
cout.setf(ios::scientific);
int n;
cin >> n;
int size = n * n;
double hm[size];
int swaps[n];
for (int i = 0; i < n; ++i) {
swaps[i] = i;
for (int j = 0; j < n; ++j) {
cin >> gett(hm, i, j); // transpose of original matrix
}
}
double *dm = NULL;
CSC(cudaMalloc(&dm, sizeof(double) * size));
CSC(cudaMemcpy(dm, hm, sizeof(double) * size, cudaMemcpyHostToDevice));
for (int r = 0; r < n-1; ++r) {
// find a max in row(columns) using thrust
thrust::device_ptr<double> dp = thrust::device_pointer_cast(dm + r*n + r);
thrust::device_ptr<double> mp = thrust::max_element(dp, dp + n - r, cmpr());
<<<<<<< HEAD
int to = r + mp - dp, t;
t = to;
while (swaps[t] != -1) t = swaps[t];
swaps[t] = r;
k_swapcolumns<<<256, 256>>>(dlu, n, r, to); CSC(cudaGetLastError());
k_lucol<<<dim3(16, 16), dim3(16, 16)>>>(dlu, n, r); CSC(cudaGetLastError());
// calc L
//for (int i = r+1; i < n; ++i) gett(hm, r, i) /= gett(hm, r, r);
=======
int to = r + mp - dp, tmp;
tmp = swaps[to], swaps[to] = swaps[r], swaps[r] = tmp;
k_swapcolumns<<<256, 256>>>(dm, n, r, to);
k_lucol<<<256, 256>>>(dm, n, r);
>>>>>>> parent of a5cf176... added PGP and MM reports
}
CSC(cudaMemcpy(hm, dm, sizeof(double) * size, cudaMemcpyDeviceToHost));
CSC(cudaFree(dm));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
cout << gett(hm, i, j) << ' ';
}
cout << endl;
}
for (int i = 0; i < n; ++i) {
cout << swaps[i] << ' ';
}
cout << endl;
}
|
2adfa42d20874220d17f8e4c31059cd69fdf0ca7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2018, NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <cuda/std/cstddef>
#include <cuda/std/cstdint>
#include <cuda/std/atomic>
template<class T> static constexpr T minimum(T a, T b) { return a < b ? a : b; }
struct trie {
struct ref {
cuda::atomic<trie*, cuda::thread_scope_device> ptr = ATOMIC_VAR_INIT(nullptr);
// the flag will protect against multiple pointer updates
cuda::std::atomic_flag flag = ATOMIC_FLAG_INIT;
} next[26];
cuda::std::atomic<short> count = ATOMIC_VAR_INIT(0);
};
__host__ __device__
int index_of(char c) {
if(c >= 'a' && c <= 'z') return c - 'a';
if(c >= 'A' && c <= 'Z') return c - 'A';
return -1;
};
__host__ __device__
void make_trie(/* trie to insert word counts into */ trie& root,
/* bump allocator to get new nodes*/ cuda::std::atomic<trie*>& bump,
/* input */ const char* begin, const char* end,
/* thread this invocation is for */ unsigned index,
/* how many threads there are */ unsigned domain) {
auto const size = end - begin;
auto const stride = (size / domain + 1);
auto off = minimum(size, stride * index);
auto const last = minimum(size, off + stride);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) != -1; ++off, c = begin[off]);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) == -1; ++off, c = begin[off]);
trie *n = &root;
for(char c = begin[off]; ; ++off, c = begin[off]) {
auto const index = off >= size ? -1 : index_of(c);
if(index == -1) {
if(n != &root) {
n->count.fetch_add(1, cuda::std::memory_order_relaxed);
n = &root;
}
//end of last word?
if(off >= size || off > last)
break;
else
continue;
}
if(n->next[index].ptr.load(cuda::memory_order_acquire) == nullptr) {
if(n->next[index].flag.test_and_set(cuda::std::memory_order_relaxed))
n->next[index].ptr.wait(nullptr, cuda::std::memory_order_acquire);
else {
auto next = bump.fetch_add(1, cuda::std::memory_order_relaxed);
n->next[index].ptr.store(next, cuda::std::memory_order_release);
n->next[index].ptr.notify_all();
}
}
n = n->next[index].ptr.load(cuda::std::memory_order_relaxed);
}
}
__global__ // __launch_bounds__(1024, 1)
void call_make_trie(trie* t, cuda::std::atomic<trie*>* bump, const char* begin, const char* end) {
auto const index = blockDim.x * blockIdx.x + threadIdx.x;
auto const domain = gridDim.x * blockDim.x;
make_trie(*t, *bump, begin, end, index, domain);
}
__global__ void do_nothing() { }
#include <iostream>
#include <cassert>
#include <fstream>
#include <utility>
#include <chrono>
#include <thread>
#include <memory>
#include <vector>
#include <string>
#define check(ans) { assert_((ans), __FILE__, __LINE__); }
inline void assert_(hipError_t code, const char *file, int line) {
if (code == hipSuccess) return;
std::cerr << "check failed: " << hipGetErrorString(code) << " : " << file << '@' << line << std::endl;
abort();
}
template <class T>
struct managed_allocator {
typedef cuda::std::size_t size_type;
typedef cuda::std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;// (deprecated in C++17)(removed in C++20) T*
typedef const T* const_pointer;// (deprecated in C++17)(removed in C++20) const T*
typedef T& reference;// (deprecated in C++17)(removed in C++20) T&
typedef const T& const_reference;// (deprecated in C++17)(removed in C++20) const T&
template< class U > struct rebind { typedef managed_allocator<U> other; };
managed_allocator() = default;
template <class U> constexpr managed_allocator(const managed_allocator<U>&) noexcept {}
T* allocate(std::size_t n) {
void* out = nullptr;
check(hipMallocManaged(&out, n*sizeof(T)));
return static_cast<T*>(out);
}
void deallocate(T* p, std::size_t) noexcept {
check(hipFree(p));
}
};
template<class T, class... Args>
T* make_(Args &&... args) {
managed_allocator<T> ma;
return new (ma.allocate(1)) T(std::forward<Args>(args)...);
}
template<class String>
void do_trie(String const& input, bool use_cuda, int blocks, int threads) {
std::vector<trie, managed_allocator<trie>> nodes(1<<17);
if(use_cuda) check(hipMemset(nodes.data(), 0, nodes.size()*sizeof(trie)));
auto t = nodes.data();
auto b = make_<cuda::std::atomic<trie*>>(nodes.data()+1);
auto const begin = std::chrono::steady_clock::now();
std::atomic_signal_fence(std::memory_order_seq_cst);
if(use_cuda) {
hipLaunchKernelGGL(( call_make_trie), dim3(blocks),dim3(threads), 0, 0, t, b, input.data(), input.data() + input.size());
check(hipDeviceSynchronize());
}
else {
assert(blocks == 1);
std::vector<std::thread> tv(threads);
for(auto count = threads; count; --count)
tv[count - 1] = std::thread([&, count]() {
make_trie(*t, *b, input.data(), input.data() + input.size(), count - 1, threads);
});
for(auto& t : tv)
t.join();
}
std::atomic_signal_fence(std::memory_order_seq_cst);
auto const end = std::chrono::steady_clock::now();
auto const time = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();
auto const count = b->load() - nodes.data();
std::cout << "Assembled " << count << " nodes on " << blocks << "x" << threads << " " << (use_cuda ? "cuda" : "cpu") << " threads in " << time << "ms." << std::endl;
}
int main() {
std::basic_string<char, std::char_traits<char>, managed_allocator<char>> input;
char const* files[] = {
"books/2600-0.txt", "books/2701-0.txt", "books/35-0.txt", "books/84-0.txt", "books/8800.txt",
"books/pg1727.txt", "books/pg55.txt", "books/pg6130.txt", "books/pg996.txt", "books/1342-0.txt"
};
for(auto* ptr : files) {
std::cout << ptr << std::endl;
auto const cur = input.size();
std::ifstream in(ptr);
if(in.fail()) {
std::cerr << "Failed to open file: " << ptr << std::endl;
return -1;
}
in.seekg(0, std::ios_base::end);
auto const pos = in.tellg();
input.resize(cur + pos);
in.seekg(0, std::ios_base::beg);
in.read((char*)input.data() + cur, pos);
}
do_trie(input, false, 1, 1);
do_trie(input, false, 1, 1);
do_trie(input, false, 1, std::thread::hardware_concurrency());
do_trie(input, false, 1, std::thread::hardware_concurrency());
assert(hipSuccess == hipSetDevice(0));
hipDeviceProp_t deviceProp;
assert(hipSuccess == hipGetDeviceProperties(&deviceProp, 0));
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
return 0;
}
|
2adfa42d20874220d17f8e4c31059cd69fdf0ca7.cu
|
/*
Copyright (c) 2018, NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <cuda/std/cstddef>
#include <cuda/std/cstdint>
#include <cuda/std/atomic>
template<class T> static constexpr T minimum(T a, T b) { return a < b ? a : b; }
struct trie {
struct ref {
cuda::atomic<trie*, cuda::thread_scope_device> ptr = ATOMIC_VAR_INIT(nullptr);
// the flag will protect against multiple pointer updates
cuda::std::atomic_flag flag = ATOMIC_FLAG_INIT;
} next[26];
cuda::std::atomic<short> count = ATOMIC_VAR_INIT(0);
};
__host__ __device__
int index_of(char c) {
if(c >= 'a' && c <= 'z') return c - 'a';
if(c >= 'A' && c <= 'Z') return c - 'A';
return -1;
};
__host__ __device__
void make_trie(/* trie to insert word counts into */ trie& root,
/* bump allocator to get new nodes*/ cuda::std::atomic<trie*>& bump,
/* input */ const char* begin, const char* end,
/* thread this invocation is for */ unsigned index,
/* how many threads there are */ unsigned domain) {
auto const size = end - begin;
auto const stride = (size / domain + 1);
auto off = minimum(size, stride * index);
auto const last = minimum(size, off + stride);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) != -1; ++off, c = begin[off]);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) == -1; ++off, c = begin[off]);
trie *n = &root;
for(char c = begin[off]; ; ++off, c = begin[off]) {
auto const index = off >= size ? -1 : index_of(c);
if(index == -1) {
if(n != &root) {
n->count.fetch_add(1, cuda::std::memory_order_relaxed);
n = &root;
}
//end of last word?
if(off >= size || off > last)
break;
else
continue;
}
if(n->next[index].ptr.load(cuda::memory_order_acquire) == nullptr) {
if(n->next[index].flag.test_and_set(cuda::std::memory_order_relaxed))
n->next[index].ptr.wait(nullptr, cuda::std::memory_order_acquire);
else {
auto next = bump.fetch_add(1, cuda::std::memory_order_relaxed);
n->next[index].ptr.store(next, cuda::std::memory_order_release);
n->next[index].ptr.notify_all();
}
}
n = n->next[index].ptr.load(cuda::std::memory_order_relaxed);
}
}
__global__ // __launch_bounds__(1024, 1)
void call_make_trie(trie* t, cuda::std::atomic<trie*>* bump, const char* begin, const char* end) {
auto const index = blockDim.x * blockIdx.x + threadIdx.x;
auto const domain = gridDim.x * blockDim.x;
make_trie(*t, *bump, begin, end, index, domain);
}
__global__ void do_nothing() { }
#include <iostream>
#include <cassert>
#include <fstream>
#include <utility>
#include <chrono>
#include <thread>
#include <memory>
#include <vector>
#include <string>
#define check(ans) { assert_((ans), __FILE__, __LINE__); }
inline void assert_(cudaError_t code, const char *file, int line) {
if (code == cudaSuccess) return;
std::cerr << "check failed: " << cudaGetErrorString(code) << " : " << file << '@' << line << std::endl;
abort();
}
template <class T>
struct managed_allocator {
typedef cuda::std::size_t size_type;
typedef cuda::std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;// (deprecated in C++17)(removed in C++20) T*
typedef const T* const_pointer;// (deprecated in C++17)(removed in C++20) const T*
typedef T& reference;// (deprecated in C++17)(removed in C++20) T&
typedef const T& const_reference;// (deprecated in C++17)(removed in C++20) const T&
template< class U > struct rebind { typedef managed_allocator<U> other; };
managed_allocator() = default;
template <class U> constexpr managed_allocator(const managed_allocator<U>&) noexcept {}
T* allocate(std::size_t n) {
void* out = nullptr;
check(cudaMallocManaged(&out, n*sizeof(T)));
return static_cast<T*>(out);
}
void deallocate(T* p, std::size_t) noexcept {
check(cudaFree(p));
}
};
template<class T, class... Args>
T* make_(Args &&... args) {
managed_allocator<T> ma;
return new (ma.allocate(1)) T(std::forward<Args>(args)...);
}
template<class String>
void do_trie(String const& input, bool use_cuda, int blocks, int threads) {
std::vector<trie, managed_allocator<trie>> nodes(1<<17);
if(use_cuda) check(cudaMemset(nodes.data(), 0, nodes.size()*sizeof(trie)));
auto t = nodes.data();
auto b = make_<cuda::std::atomic<trie*>>(nodes.data()+1);
auto const begin = std::chrono::steady_clock::now();
std::atomic_signal_fence(std::memory_order_seq_cst);
if(use_cuda) {
call_make_trie<<<blocks,threads>>>(t, b, input.data(), input.data() + input.size());
check(cudaDeviceSynchronize());
}
else {
assert(blocks == 1);
std::vector<std::thread> tv(threads);
for(auto count = threads; count; --count)
tv[count - 1] = std::thread([&, count]() {
make_trie(*t, *b, input.data(), input.data() + input.size(), count - 1, threads);
});
for(auto& t : tv)
t.join();
}
std::atomic_signal_fence(std::memory_order_seq_cst);
auto const end = std::chrono::steady_clock::now();
auto const time = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();
auto const count = b->load() - nodes.data();
std::cout << "Assembled " << count << " nodes on " << blocks << "x" << threads << " " << (use_cuda ? "cuda" : "cpu") << " threads in " << time << "ms." << std::endl;
}
int main() {
std::basic_string<char, std::char_traits<char>, managed_allocator<char>> input;
char const* files[] = {
"books/2600-0.txt", "books/2701-0.txt", "books/35-0.txt", "books/84-0.txt", "books/8800.txt",
"books/pg1727.txt", "books/pg55.txt", "books/pg6130.txt", "books/pg996.txt", "books/1342-0.txt"
};
for(auto* ptr : files) {
std::cout << ptr << std::endl;
auto const cur = input.size();
std::ifstream in(ptr);
if(in.fail()) {
std::cerr << "Failed to open file: " << ptr << std::endl;
return -1;
}
in.seekg(0, std::ios_base::end);
auto const pos = in.tellg();
input.resize(cur + pos);
in.seekg(0, std::ios_base::beg);
in.read((char*)input.data() + cur, pos);
}
do_trie(input, false, 1, 1);
do_trie(input, false, 1, 1);
do_trie(input, false, 1, std::thread::hardware_concurrency());
do_trie(input, false, 1, std::thread::hardware_concurrency());
assert(cudaSuccess == cudaSetDevice(0));
cudaDeviceProp deviceProp;
assert(cudaSuccess == cudaGetDeviceProperties(&deviceProp, 0));
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
return 0;
}
|
34957ea262e0d9cac24791f110559fef9a546eb4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <hip/hip_runtime.h>
#include "cuda_header.cuh"
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (false) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
hipEvent_t start_gpu, stop_gpu;
float time_milli = -1;
hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
hipEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
// TODO: Appropriately call the kernel function.
// Check for errors on kernel call
hipError_t err = hipGetLastError();
if (hipSuccess != err)
fprintf(stderr, "Error %s\n", hipGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
// Stop the recording timer and return the computation time
hipEventRecord(stop_gpu);
hipEventSynchronize(stop_gpu);
hipEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
|
34957ea262e0d9cac24791f110559fef9a546eb4.cu
|
/*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
* Revised by Loko Kung, 2018
*/
#include "blur.cuh"
#include <cstdio>
#include <cuda_runtime.h>
#include "cuda_header.cuh"
CUDA_CALLABLE
void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data,
const float* gpu_blur_v, float* gpu_out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// TODO: Implement the necessary convolution function that should be
// completed for each thread_index. Use the CPU implementation in
// blur.cpp as a reference.
}
__global__
void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v,
float *gpu_out_data, int n_frames, int blur_v_size) {
// TODO: Compute the current thread index.
uint thread_index;
// TODO: Update the while loop to handle all indices for this thread.
// Remember to advance the index as necessary.
while (false) {
// Do computation for this thread index
cuda_blur_kernel_convolution(thread_index, gpu_raw_data,
gpu_blur_v, gpu_out_data,
n_frames, blur_v_size);
// TODO: Update the thread index
}
}
float cuda_call_blur_kernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
// Use the CUDA machinery for recording time
cudaEvent_t start_gpu, stop_gpu;
float time_milli = -1;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu);
// TODO: Allocate GPU memory for the raw input data (either audio file
// data or randomly generated data. The data is of type float and
// has n_frames elements. Then copy the data in raw_data into the
// GPU memory you allocated.
float* gpu_raw_data;
// TODO: Allocate GPU memory for the impulse signal (for now global GPU
// memory is fine. The data is of type float and has blur_v_size
// elements. Then copy the data in blur_v into the GPU memory you
// allocated.
float* gpu_blur_v;
// TODO: Allocate GPU memory to store the output audio signal after the
// convolution. The data is of type float and has n_frames elements.
// Initialize the data as necessary.
float* gpu_out_data;
// TODO: Appropriately call the kernel function.
// Check for errors on kernel call
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
else
fprintf(stderr, "No kernel error detected\n");
// TODO: Now that kernel calls have finished, copy the output signal
// back from the GPU to host memory. (We store this channel's result
// in out_data on the host.)
// TODO: Now that we have finished our computations on the GPU, free the
// GPU resources.
// Stop the recording timer and return the computation time
cudaEventRecord(stop_gpu);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time_milli, start_gpu, stop_gpu);
return time_milli;
}
|
1d79303765ac09705adc8bae8d325733f799e898.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "custom_cuda.h"
#define DEBUG 0
bool ERROR_CHECK(hipError_t Status, string file, int line)
{
if(Status != hipSuccess)
{
printf("(EE) \n");
printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file.c_str(), line);
printf("(EE) MSG: %s\n", hipGetErrorString(Status));
printf("(EE) \n");
exit(0);
return false;
}
return true;
}
char* FilenamePtr(const char* filename){
char* fname = (char*)filename;
char* ptr = fname;
while( *fname != 0 ){
if( *fname == '\\' ) ptr = fname + 1;
if( *fname == '/' ) ptr = fname + 1;
fname += 1;
}
return ptr;
}
void CUDA_MALLOC_HOST(float** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(float);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(int** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(int);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(unsigned int** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(unsigned int);
Status = hipHostMalloc(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
static size_t aDevice = 0;
void CUDA_MALLOC_HOST(char** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(char);
Status = hipHostMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(signed char** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(signed char);
Status = hipHostMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(float** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(float);
Status = hipMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(int** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(int);
Status = hipMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(unsigned int** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(unsigned int);
Status = hipMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(char** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(char);
Status = hipMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(signed char** ptr, size_t nbElements, const char * file, int line){
hipError_t Status;
size_t nbytes = nbElements * sizeof(signed char);
Status = hipMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
|
1d79303765ac09705adc8bae8d325733f799e898.cu
|
#include "custom_cuda.h"
#define DEBUG 0
bool ERROR_CHECK(cudaError_t Status, string file, int line)
{
if(Status != cudaSuccess)
{
printf("(EE) \n");
printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file.c_str(), line);
printf("(EE) MSG: %s\n", cudaGetErrorString(Status));
printf("(EE) \n");
exit(0);
return false;
}
return true;
}
char* FilenamePtr(const char* filename){
char* fname = (char*)filename;
char* ptr = fname;
while( *fname != 0 ){
if( *fname == '\\' ) ptr = fname + 1;
if( *fname == '/' ) ptr = fname + 1;
fname += 1;
}
return ptr;
}
void CUDA_MALLOC_HOST(float** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(float);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(int** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(int);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(unsigned int** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(unsigned int);
Status = cudaMallocHost(ptr, nbytes);
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
static size_t aDevice = 0;
void CUDA_MALLOC_HOST(char** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(char);
Status = cudaMallocHost(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_HOST(signed char** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(signed char);
Status = cudaMallocHost(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(float** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(float);
Status = cudaMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(int** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(int);
Status = cudaMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(unsigned int** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(unsigned int);
Status = cudaMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(char** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(char);
Status = cudaMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
void CUDA_MALLOC_DEVICE(signed char** ptr, size_t nbElements, const char * file, int line){
cudaError_t Status;
size_t nbytes = nbElements * sizeof(signed char);
Status = cudaMalloc(ptr, nbytes);
aDevice += nbytes;
#if DEBUG == 1
printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1);
// printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024);
#endif
ERROR_CHECK(Status, __FILE__, __LINE__);
}
|
3afe6cb8b6b574263d150be6e303e9b43438ccfc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2015 Davis E. King ([email protected])
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(hipSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(hipGetDevice(&dev));
return dev;
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(hipGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(hipDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(hipDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = hipDeviceEnablePeerAccess(peer_device_id, 0);
if (err == hipSuccess)
{
call_disable = true;
}
else if (err == hipErrorPeerAccessAlreadyEnabled)
{
// call hipGetLastError() to dispose of this error since we don't
// care.
auto err2 = hipGetLastError();
if (err2 != hipErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(hipDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() ,"");
const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) ,"");
if (dest.size() == 0)
return;
const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range(0, ks))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),"");
if (dest.size() == 0)
return;
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2),"");
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),"");
if (dest.size() == 0)
return;
dim3 blocks(10,1);
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
hipLaunchKernelGGL(( _cuda_multiply_conv2), dim3(blocks),dim3(threads), 0, 0,
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())) &&
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),"");
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t n,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(0, n))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size(),"");
const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(s.size()),
s.size(), s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(have_same_dimensions(A, B),"");
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),"");
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0,"");
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size(), "");
DLIB_CASSERT(idx < result.size(), "");
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
}
}
|
3afe6cb8b6b574263d150be6e303e9b43438ccfc.cu
|
// Copyright (C) 2015 Davis E. King ([email protected])
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(cudaSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(cudaGetDevice(&dev));
return dev;
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(cudaGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(cudaDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(cudaDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = cudaDeviceEnablePeerAccess(peer_device_id, 0);
if (err == cudaSuccess)
{
call_disable = true;
}
else if (err == cudaErrorPeerAccessAlreadyEnabled)
{
// call cudaGetLastError() to dispose of this error since we don't
// care.
auto err2 = cudaGetLastError();
if (err2 != cudaErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(cudaDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() ,"");
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) ,"");
if (dest.size() == 0)
return;
const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range(0, ks))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),"");
if (dest.size() == 0)
return;
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2),"");
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),"");
if (dest.size() == 0)
return;
dim3 blocks(10,1);
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
_cuda_multiply_conv2<<<blocks,threads>>>(
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())) &&
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),"");
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t n,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(0, n))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size(),"");
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(s.size()),
s.size(), s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(have_same_dimensions(A, B),"");
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),"");
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0,"");
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size(), "");
DLIB_CASSERT(idx < result.size(), "");
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
}
}
|
9c2251c3c8b5b8437ee72fd42211bc43a473595d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_point_feature) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_point_feature + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_point_feature + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_point_feature + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind =
y_coor * grid_x_size * max_points_per_pillar * num_point_feature +
x_coor * max_points_per_pillar * num_point_feature +
count * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_point_feature + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
float* dev_pillar_coors, int* dev_x_coors, int* dev_y_coors,
float* dev_num_points_per_pillar, const int max_points,
const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
float coor_x = static_cast<float>(x_ind);
float coor_y = static_cast<float>(y_ind);
// TODO(chenjiahao): replace '4' with hyper-parameter vars
// TODO(chenjiahao): batch idx and z need to be specified after
// voxels are utilized
dev_pillar_coors[ith_pillar * 4 + 0] = 0; // batch idx
dev_pillar_coors[ith_pillar * 4 + 1] = 0; // z
dev_pillar_coors[ith_pillar * 4 + 2] = coor_y;
dev_pillar_coors[ith_pillar * 4 + 3] = coor_x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range) {
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(hipFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(hipFree(dev_pillar_count_histo_));
GPU_CHECK(hipFree(dev_counter_));
GPU_CHECK(hipFree(dev_pillar_count_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(hipMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(hipMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
hipLaunchKernelGGL(( make_pillar_histo_kernel), dim3(num_block), dim3(num_threads_), 0, 0,
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_point_feature_);
hipLaunchKernelGGL(( make_pillar_index_kernel), dim3(grid_x_size_), dim3(grid_y_size_), 0, 0,
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, dev_sparse_pillar_map,
max_num_pillars_, max_num_points_per_pillar_, grid_x_size_,
num_inds_for_scan_);
GPU_CHECK(hipMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( make_pillar_feature_kernel), dim3(host_pillar_count[0]),
dim3(max_num_points_per_pillar_), 0, 0,
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature,
dev_pillar_coors, dev_x_coors, dev_y_coors, dev_num_points_per_pillar,
max_num_points_per_pillar_, num_point_feature_, grid_x_size_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
9c2251c3c8b5b8437ee72fd42211bc43a473595d.cu
|
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_point_feature) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_point_feature + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_point_feature + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_point_feature + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind =
y_coor * grid_x_size * max_points_per_pillar * num_point_feature +
x_coor * max_points_per_pillar * num_point_feature +
count * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_point_feature + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
float* dev_pillar_coors, int* dev_x_coors, int* dev_y_coors,
float* dev_num_points_per_pillar, const int max_points,
const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
float coor_x = static_cast<float>(x_ind);
float coor_y = static_cast<float>(y_ind);
// TODO(chenjiahao): replace '4' with hyper-parameter vars
// TODO(chenjiahao): batch idx and z need to be specified after
// voxels are utilized
dev_pillar_coors[ith_pillar * 4 + 0] = 0; // batch idx
dev_pillar_coors[ith_pillar * 4 + 1] = 0; // z
dev_pillar_coors[ith_pillar * 4 + 2] = coor_y;
dev_pillar_coors[ith_pillar * 4 + 3] = coor_x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range) {
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(cudaFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(cudaFree(dev_pillar_count_histo_));
GPU_CHECK(cudaFree(dev_counter_));
GPU_CHECK(cudaFree(dev_pillar_count_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(cudaMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(cudaMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
make_pillar_histo_kernel<<<num_block, num_threads_>>>(
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_point_feature_);
make_pillar_index_kernel<<<grid_x_size_, grid_y_size_>>>(
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, dev_sparse_pillar_map,
max_num_pillars_, max_num_points_per_pillar_, grid_x_size_,
num_inds_for_scan_);
GPU_CHECK(cudaMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
cudaMemcpyDeviceToHost));
make_pillar_feature_kernel<<<host_pillar_count[0],
max_num_points_per_pillar_>>>(
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature,
dev_pillar_coors, dev_x_coors, dev_y_coors, dev_num_points_per_pillar,
max_num_points_per_pillar_, num_point_feature_, grid_x_size_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
6a75427871edc437c2e707a848a354135e4175b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <hipcub/hipcub.hpp>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/softmax_kernel_impl.h"
#include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(phi::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
phi::ToNCCLDataType(num_classes_per_device.dtype()),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage =
phi::memory_utils::Alloc(place, cub_temp_storage_bytes);
hipcub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void CalculateGrad(T* logits_grad,
const T* loss_grad,
const T* logits,
const IndexT* label,
const float margin1,
const float margin2,
const float scale,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == label[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType dout = static_cast<MPType>(logits_grad[i]);
MPType one = static_cast<MPType>(1.0f);
MPType x = static_cast<MPType>(logits[i]);
MPType m1 = static_cast<MPType>(margin1);
MPType m2 = static_cast<MPType>(margin2);
MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x);
logits_grad[i] = static_cast<T>(dout * d);
}
} else {
logits_grad[i] *= loss_grad[row];
}
if (fabs(scale - 1.0) > 1e-8) {
logits_grad[i] *= static_cast<T>(scale);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyGradKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
const DenseTensor& softmax,
const DenseTensor& loss_grad,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* logits_grad) {
const auto softmax_dims = softmax.dims();
const int axis = softmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, softmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, softmax_dims);
if (return_softmax) {
phi::Copy<Context>(
dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad);
} else {
logits_grad->ShareDataWith(softmax);
}
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = label.dtype();
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
if (label_type == phi::DataType::INT32) {
typedef int32_t LabelT;
hipLaunchKernelGGL(( CalculateGrad<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == phi::DataType::INT64) {
typedef int64_t LabelT;
hipLaunchKernelGGL(( CalculateGrad<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
}
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy_grad,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyGradKernel,
float,
double,
phi::dtype::float16) {}
|
6a75427871edc437c2e707a848a354135e4175b1.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <cub/cub.cuh>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/impl/softmax_kernel_impl.h"
#include "paddle/phi/kernels/margin_cross_entropy_grad_kernel.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(phi::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
phi::ToNCCLDataType(num_classes_per_device.dtype()),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage =
phi::memory_utils::Alloc(place, cub_temp_storage_bytes);
cub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void CalculateGrad(T* logits_grad,
const T* loss_grad,
const T* logits,
const IndexT* label,
const float margin1,
const float margin2,
const float scale,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == label[row]) {
logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row];
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType dout = static_cast<MPType>(logits_grad[i]);
MPType one = static_cast<MPType>(1.0f);
MPType x = static_cast<MPType>(logits[i]);
MPType m1 = static_cast<MPType>(margin1);
MPType m2 = static_cast<MPType>(margin2);
MPType d = m1 * sin(m1 * acos(x) + m2) / sqrt(one - x * x);
logits_grad[i] = static_cast<T>(dout * d);
}
} else {
logits_grad[i] *= loss_grad[row];
}
if (fabs(scale - 1.0) > 1e-8) {
logits_grad[i] *= static_cast<T>(scale);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyGradKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
const DenseTensor& softmax,
const DenseTensor& loss_grad,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* logits_grad) {
const auto softmax_dims = softmax.dims();
const int axis = softmax_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, softmax_dims);
const int D = phi::funcs::SizeFromAxis(axis, softmax_dims);
if (return_softmax) {
phi::Copy<Context>(
dev_ctx, softmax, dev_ctx.GetPlace(), false, logits_grad);
} else {
logits_grad->ShareDataWith(softmax);
}
int blocks = NumBlocks(N * D);
int threads = kNumCUDAThreads;
const auto& label_type = label.dtype();
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
if (label_type == phi::DataType::INT32) {
typedef int32_t LabelT;
CalculateGrad<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == phi::DataType::INT64) {
typedef int64_t LabelT;
CalculateGrad<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(logits_grad->data<T>(),
loss_grad.data<T>(),
logits.data<T>(),
label.data<LabelT>(),
margin1,
margin2,
scale,
rank,
N,
D,
class_interval.data<int>());
}
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy_grad,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyGradKernel,
float,
double,
phi::dtype::float16) {}
|
e5133d8317e024c8e007b373c4078af8c099933b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include "mttkrp_cpu.h"
#include <bits/stdc++.h>
// #include <papi.h>
#include "mttkrp_gpu.h"
using namespace std;
void handle_error(int err){
printf("PAPI error %d \n", err);
}
int main(int argc, char* argv[]){
// int numEvents = 7;
// long long *values = new long long [numEvents];
// int events[7] = {PAPI_L1_DCM, PAPI_L2_DCM,
// PAPI_RES_STL, PAPI_L3_TCM,
// PAPI_LD_INS, PAPI_SR_INS, PAPI_BR_INS
// };
// int events[1] = {PAPI_L1_DCM, PAPI_L2_DCM, PAPI_L3_DCA,
// PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM,
// PAPI_RES_STL, PAPI_LST_INS,
// PAPI_PRF_DM, PAPI_LD_INS, PAPI_SP_OPS, PAPI_VEC_SP};
// int retval = PAPI_library_init(PAPI_VER_CURRENT);
// if (retval != PAPI_VER_CURRENT) {
// printf("Error! PAPI_library_init %d\n",retval);
// }
Options Opt = parse_cmd_options(argc, argv);
// Opt.print();
Tensor X;
load_tensor(X, Opt.inFileName);
//TBD:: sort X
// tensor_stats(X);
// print_COOtensor(X);
//TBD:: fix hard coded 3
Matrix U[3];
for (int i = 0; i < X.ndims; ++i){
create_mats(X, U, i, Opt.R);
randomize_mats(X, U, i, Opt.R);
}
zero_mat(X, U, Opt.mode);
if(Opt.verbose)
cout << endl << "Starting MTTKRP..." << endl;
// COO CPU
if(Opt.impType == 1){
double t0 = seconds();
MTTKRP_COO_CPU(X, U, Opt.mode, Opt.R);
printf("COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR CPU
else if(Opt.impType == 2){
create_HCSR(X);
// print_HCSRtensor(X);
double t0 = seconds();
// if (PAPI_start_counters(events, numEvents) != PAPI_OK)
// handle_error(1);
MTTKRP_HCSR_CPU(X, U, Opt.mode, Opt.R);
// if ( PAPI_stop_counters(values, numEvents) != PAPI_OK)
// handle_error(1);
// for (int nm = 0; nm < numEvents; ++nm)
// printf(" %d " , values[nm]);
// printf("\n");
printf("HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// COO GPU
else if(Opt.impType == 3){
cout << " GPU COO has bugs! " << endl;
MTTKRP_COO_GPU(X, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 4){
create_HCSR(X);
MTTKRP_HCSR_GPU(X, U, Opt);
}
// Tiled versions
else if(Opt.impType >= 5){
TiledTensor TiledX[Opt.nTile];
create_HCSR(X);
Opt.tileSize = (X.dims[2] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[2]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[2] << "). Exiting."<< endl ;
exit(0);
}
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
create_TiledHCSR(TiledX, tile);
// print_TiledHCSRtensor(TiledX, tile);
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
make_TiledBin(TiledX, Opt, tile);
}
// MTTKRP
// COO GPU
if(Opt.impType == 5){
double t0 = seconds();
MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR GPU
else if(Opt.impType == 6){
double t0 = seconds();
MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt);
printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
//COO GPU
else if(Opt.impType == 7){
cout << "GPU COO has bugs! " << endl;
MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 8){
MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
}
}
else // e.g. -1
cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty())
write_output(U, Opt.mode, Opt.outFileName);
}
|
e5133d8317e024c8e007b373c4078af8c099933b.cu
|
#include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <cuda.h>
#include "mttkrp_cpu.h"
#include <bits/stdc++.h>
// #include <papi.h>
#include "mttkrp_gpu.h"
using namespace std;
void handle_error(int err){
printf("PAPI error %d \n", err);
}
int main(int argc, char* argv[]){
// int numEvents = 7;
// long long *values = new long long [numEvents];
// int events[7] = {PAPI_L1_DCM, PAPI_L2_DCM,
// PAPI_RES_STL, PAPI_L3_TCM,
// PAPI_LD_INS, PAPI_SR_INS, PAPI_BR_INS
// };
// int events[1] = {PAPI_L1_DCM, PAPI_L2_DCM, PAPI_L3_DCA,
// PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM,
// PAPI_RES_STL, PAPI_LST_INS,
// PAPI_PRF_DM, PAPI_LD_INS, PAPI_SP_OPS, PAPI_VEC_SP};
// int retval = PAPI_library_init(PAPI_VER_CURRENT);
// if (retval != PAPI_VER_CURRENT) {
// printf("Error! PAPI_library_init %d\n",retval);
// }
Options Opt = parse_cmd_options(argc, argv);
// Opt.print();
Tensor X;
load_tensor(X, Opt.inFileName);
//TBD:: sort X
// tensor_stats(X);
// print_COOtensor(X);
//TBD:: fix hard coded 3
Matrix U[3];
for (int i = 0; i < X.ndims; ++i){
create_mats(X, U, i, Opt.R);
randomize_mats(X, U, i, Opt.R);
}
zero_mat(X, U, Opt.mode);
if(Opt.verbose)
cout << endl << "Starting MTTKRP..." << endl;
// COO CPU
if(Opt.impType == 1){
double t0 = seconds();
MTTKRP_COO_CPU(X, U, Opt.mode, Opt.R);
printf("COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR CPU
else if(Opt.impType == 2){
create_HCSR(X);
// print_HCSRtensor(X);
double t0 = seconds();
// if (PAPI_start_counters(events, numEvents) != PAPI_OK)
// handle_error(1);
MTTKRP_HCSR_CPU(X, U, Opt.mode, Opt.R);
// if ( PAPI_stop_counters(values, numEvents) != PAPI_OK)
// handle_error(1);
// for (int nm = 0; nm < numEvents; ++nm)
// printf(" %d " , values[nm]);
// printf("\n");
printf("HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// COO GPU
else if(Opt.impType == 3){
cout << " GPU COO has bugs! " << endl;
MTTKRP_COO_GPU(X, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 4){
create_HCSR(X);
MTTKRP_HCSR_GPU(X, U, Opt);
}
// Tiled versions
else if(Opt.impType >= 5){
TiledTensor TiledX[Opt.nTile];
create_HCSR(X);
Opt.tileSize = (X.dims[2] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[2]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[2] << "). Exiting."<< endl ;
exit(0);
}
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
create_TiledHCSR(TiledX, tile);
// print_TiledHCSRtensor(TiledX, tile);
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
make_TiledBin(TiledX, Opt, tile);
}
// MTTKRP
// COO GPU
if(Opt.impType == 5){
double t0 = seconds();
MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR GPU
else if(Opt.impType == 6){
double t0 = seconds();
MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt);
printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
//COO GPU
else if(Opt.impType == 7){
cout << "GPU COO has bugs! " << endl;
MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 8){
MTTKRP_TILED_HCSR_GPU(TiledX, U, Opt);
}
}
else // e.g. -1
cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty())
write_output(U, Opt.mode, Opt.outFileName);
}
|
32d052dc9ce54668ecfc6f9bafb133b43841415e.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hipcub/hipcub.hpp>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/CoreUtil.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
void PreprocessDepthCUDA(const core::Tensor& depth,
core::Tensor& depth_processed,
float depth_scale,
float depth_max) {
NDArrayIndexer depth_in_indexer(depth, 2);
// Output
depth_processed = core::Tensor::Empty(
depth.GetShape(), core::Dtype::Float32, depth.GetDevice());
NDArrayIndexer depth_out_indexer(depth_processed, 2);
int64_t rows = depth_in_indexer.GetShape(0);
int64_t cols = depth_in_indexer.GetShape(1);
int64_t n = rows * cols;
DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() {
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float d = *depth_in_indexer.GetDataPtrFromCoord<scalar_t>(
x, y) /
depth_scale;
float* d_out_ptr =
depth_out_indexer.GetDataPtrFromCoord<float>(x, y);
bool valid = (d > 0 && d < depth_max);
*d_out_ptr = valid ? d : NAN;
});
});
}
void PyrDownDepthCUDA(const core::Tensor& depth,
core::Tensor& depth_down,
float depth_diff) {
t::geometry::kernel::NDArrayIndexer depth_indexer(depth, 2);
int rows = depth_indexer.GetShape(0);
int cols = depth_indexer.GetShape(1);
int rows_down = rows / 2;
int cols_down = cols / 2;
depth_down = core::Tensor::Empty({rows_down, cols_down},
core::Dtype::Float32, depth.GetDevice());
t::geometry::kernel::NDArrayIndexer depth_down_indexer(depth_down, 2);
int n = rows_down * cols_down;
// Gaussian filter window size
const int D = 5;
// Gaussian filter weights
const float weights[3] = {0.375f, 0.25f, 0.0625f};
// Reference:
// https://github.com/mp3guy/ICPCUDA/blob/master/Cuda/pyrdown.cu#L41
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int y = workload_idx / cols_down;
int x = workload_idx % cols_down;
float center =
*depth_indexer.GetDataPtrFromCoord<float>(2 * x, 2 * y);
if (isnan(center)) {
*depth_down_indexer.GetDataPtrFromCoord<float>(x, y) = NAN;
return;
}
int x_min = max(0, 2 * x - D / 2) - 2 * x;
int y_min = max(0, 2 * y - D / 2) - 2 * y;
int x_max = min(cols, 2 * x - D / 2 + D) - 2 * x;
int y_max = min(rows, 2 * y - D / 2 + D) - 2 * y;
float sum = 0;
float sum_weight = 0;
for (int yi = y_min; yi < y_max; ++yi) {
for (int xi = x_min; xi < x_max; ++xi) {
float val = *depth_indexer.GetDataPtrFromCoord<float>(
2 * x + xi, 2 * y + yi);
if (!isnan(val) && abs(val - center) < depth_diff) {
sum += val * weights[abs(xi)] * weights[abs(yi)];
sum_weight += weights[abs(xi)] * weights[abs(yi)];
}
}
}
*depth_down_indexer.GetDataPtrFromCoord<float>(x, y) =
sum / sum_weight;
});
}
void CreateVertexMapCUDA(const core::Tensor& depth_map,
const core::Tensor& intrinsics,
core::Tensor& vertex_map) {
NDArrayIndexer depth_indexer(depth_map, 2);
t::geometry::kernel::TransformIndexer ti(intrinsics);
// Output
int64_t rows = depth_indexer.GetShape(0);
int64_t cols = depth_indexer.GetShape(1);
vertex_map = core::Tensor::Empty({rows, cols, 3}, core::Dtype::Float32,
depth_map.GetDevice());
NDArrayIndexer vertex_indexer(vertex_map, 2);
int64_t n = rows * cols;
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float d = *depth_indexer.GetDataPtrFromCoord<float>(x, y);
float* vertex = vertex_indexer.GetDataPtrFromCoord<float>(x, y);
if (!isnan(d)) {
ti.Unproject(static_cast<float>(x), static_cast<float>(y),
d, vertex + 0, vertex + 1, vertex + 2);
} else {
vertex[0] = NAN;
}
});
}
void CreateNormalMapCUDA(const core::Tensor& vertex_map,
core::Tensor& normal_map) {
NDArrayIndexer vertex_indexer(vertex_map, 2);
// Output
int64_t rows = vertex_indexer.GetShape(0);
int64_t cols = vertex_indexer.GetShape(1);
normal_map =
core::Tensor::Empty(vertex_map.GetShape(), vertex_map.GetDtype(),
vertex_map.GetDevice());
NDArrayIndexer normal_indexer(normal_map, 2);
int64_t n = rows * cols;
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float* normal = normal_indexer.GetDataPtrFromCoord<float>(x, y);
if (y < rows - 1 && x < cols - 1) {
float* v00 =
vertex_indexer.GetDataPtrFromCoord<float>(x, y);
float* v10 =
vertex_indexer.GetDataPtrFromCoord<float>(x + 1, y);
float* v01 =
vertex_indexer.GetDataPtrFromCoord<float>(x, y + 1);
if (isnan(v00[0]) || isnan(v10[0]) || isnan(v01[0])) {
normal[0] = NAN;
return;
}
float dx0 = v01[0] - v00[0];
float dy0 = v01[1] - v00[1];
float dz0 = v01[2] - v00[2];
float dx1 = v10[0] - v00[0];
float dy1 = v10[1] - v00[1];
float dz1 = v10[2] - v00[2];
normal[0] = dy0 * dz1 - dz0 * dy1;
normal[1] = dz0 * dx1 - dx0 * dz1;
normal[2] = dx0 * dy1 - dy0 * dx1;
float normal_norm =
sqrt(normal[0] * normal[0] + normal[1] * normal[1] +
normal[2] * normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
} else {
normal[0] = NAN;
}
});
}
void ReduceAndSolve6x6(float* A_reduction,
core::Tensor& delta,
core::Tensor& residual,
int64_t n,
const core::Device& device) {
core::Tensor output_29 =
core::Tensor::Empty({29}, core::Dtype::Float32, device);
float* output_29_data = output_29.GetDataPtr<float>();
// Reduction of {29, N} to {29}.
for (int i = 0; i < 29; i++) {
// Determine temporary device storage requirements.
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
// Allocate temporary storage.
hipMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction.
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
hipFree(d_temp_storage);
}
DecodeAndSolve6x6(output_29, delta, residual);
}
void ComputePosePointToPlaneCUDA(const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to {29} where
// [0, 20] elements are used to construct {6,6} shaped symmetric AtA matrix,
// [21, 26] elements are used to construct {6} AtB matrix, element [27]
// stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_ij[6];
float r;
bool valid = GetJacobianPointToPlane(
workload_idx, cols, depth_diff, source_vertex_indexer,
target_vertex_indexer, target_normal_indexer, ti, J_ij,
r);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_ij[j] * J_ij[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_ij[j] * r;
}
A_reduction[n * 27 + workload_idx] = r * r;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseIntensityCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped symmetric
// AtA matrix, [21, 26] elements are used to construct {6} AtB matrix,
// element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6];
float r_I;
bool valid = GetJacobianIntensity(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, r_I);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] = J_I[j] * J_I[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_I[j] * r_I;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped symmetric
// AtA matrix, [21, 26] elements are used to construct {6} AtB matrix,
// element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6], J_D[6];
float r_I, r_D;
bool valid = GetJacobianHybrid(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, J_D, r_I, r_D);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_I[j] * J_I[k] + J_D[j] * J_D[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] =
J_I[j] * r_I + J_D[j] * r_D;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I + r_D * r_D;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
32d052dc9ce54668ecfc6f9bafb133b43841415e.cu
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cub/cub.cuh>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/CoreUtil.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
void PreprocessDepthCUDA(const core::Tensor& depth,
core::Tensor& depth_processed,
float depth_scale,
float depth_max) {
NDArrayIndexer depth_in_indexer(depth, 2);
// Output
depth_processed = core::Tensor::Empty(
depth.GetShape(), core::Dtype::Float32, depth.GetDevice());
NDArrayIndexer depth_out_indexer(depth_processed, 2);
int64_t rows = depth_in_indexer.GetShape(0);
int64_t cols = depth_in_indexer.GetShape(1);
int64_t n = rows * cols;
DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() {
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float d = *depth_in_indexer.GetDataPtrFromCoord<scalar_t>(
x, y) /
depth_scale;
float* d_out_ptr =
depth_out_indexer.GetDataPtrFromCoord<float>(x, y);
bool valid = (d > 0 && d < depth_max);
*d_out_ptr = valid ? d : NAN;
});
});
}
void PyrDownDepthCUDA(const core::Tensor& depth,
core::Tensor& depth_down,
float depth_diff) {
t::geometry::kernel::NDArrayIndexer depth_indexer(depth, 2);
int rows = depth_indexer.GetShape(0);
int cols = depth_indexer.GetShape(1);
int rows_down = rows / 2;
int cols_down = cols / 2;
depth_down = core::Tensor::Empty({rows_down, cols_down},
core::Dtype::Float32, depth.GetDevice());
t::geometry::kernel::NDArrayIndexer depth_down_indexer(depth_down, 2);
int n = rows_down * cols_down;
// Gaussian filter window size
const int D = 5;
// Gaussian filter weights
const float weights[3] = {0.375f, 0.25f, 0.0625f};
// Reference:
// https://github.com/mp3guy/ICPCUDA/blob/master/Cuda/pyrdown.cu#L41
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int y = workload_idx / cols_down;
int x = workload_idx % cols_down;
float center =
*depth_indexer.GetDataPtrFromCoord<float>(2 * x, 2 * y);
if (isnan(center)) {
*depth_down_indexer.GetDataPtrFromCoord<float>(x, y) = NAN;
return;
}
int x_min = max(0, 2 * x - D / 2) - 2 * x;
int y_min = max(0, 2 * y - D / 2) - 2 * y;
int x_max = min(cols, 2 * x - D / 2 + D) - 2 * x;
int y_max = min(rows, 2 * y - D / 2 + D) - 2 * y;
float sum = 0;
float sum_weight = 0;
for (int yi = y_min; yi < y_max; ++yi) {
for (int xi = x_min; xi < x_max; ++xi) {
float val = *depth_indexer.GetDataPtrFromCoord<float>(
2 * x + xi, 2 * y + yi);
if (!isnan(val) && abs(val - center) < depth_diff) {
sum += val * weights[abs(xi)] * weights[abs(yi)];
sum_weight += weights[abs(xi)] * weights[abs(yi)];
}
}
}
*depth_down_indexer.GetDataPtrFromCoord<float>(x, y) =
sum / sum_weight;
});
}
void CreateVertexMapCUDA(const core::Tensor& depth_map,
const core::Tensor& intrinsics,
core::Tensor& vertex_map) {
NDArrayIndexer depth_indexer(depth_map, 2);
t::geometry::kernel::TransformIndexer ti(intrinsics);
// Output
int64_t rows = depth_indexer.GetShape(0);
int64_t cols = depth_indexer.GetShape(1);
vertex_map = core::Tensor::Empty({rows, cols, 3}, core::Dtype::Float32,
depth_map.GetDevice());
NDArrayIndexer vertex_indexer(vertex_map, 2);
int64_t n = rows * cols;
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float d = *depth_indexer.GetDataPtrFromCoord<float>(x, y);
float* vertex = vertex_indexer.GetDataPtrFromCoord<float>(x, y);
if (!isnan(d)) {
ti.Unproject(static_cast<float>(x), static_cast<float>(y),
d, vertex + 0, vertex + 1, vertex + 2);
} else {
vertex[0] = NAN;
}
});
}
void CreateNormalMapCUDA(const core::Tensor& vertex_map,
core::Tensor& normal_map) {
NDArrayIndexer vertex_indexer(vertex_map, 2);
// Output
int64_t rows = vertex_indexer.GetShape(0);
int64_t cols = vertex_indexer.GetShape(1);
normal_map =
core::Tensor::Empty(vertex_map.GetShape(), vertex_map.GetDtype(),
vertex_map.GetDevice());
NDArrayIndexer normal_indexer(normal_map, 2);
int64_t n = rows * cols;
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float* normal = normal_indexer.GetDataPtrFromCoord<float>(x, y);
if (y < rows - 1 && x < cols - 1) {
float* v00 =
vertex_indexer.GetDataPtrFromCoord<float>(x, y);
float* v10 =
vertex_indexer.GetDataPtrFromCoord<float>(x + 1, y);
float* v01 =
vertex_indexer.GetDataPtrFromCoord<float>(x, y + 1);
if (isnan(v00[0]) || isnan(v10[0]) || isnan(v01[0])) {
normal[0] = NAN;
return;
}
float dx0 = v01[0] - v00[0];
float dy0 = v01[1] - v00[1];
float dz0 = v01[2] - v00[2];
float dx1 = v10[0] - v00[0];
float dy1 = v10[1] - v00[1];
float dz1 = v10[2] - v00[2];
normal[0] = dy0 * dz1 - dz0 * dy1;
normal[1] = dz0 * dx1 - dx0 * dz1;
normal[2] = dx0 * dy1 - dy0 * dx1;
float normal_norm =
sqrt(normal[0] * normal[0] + normal[1] * normal[1] +
normal[2] * normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
} else {
normal[0] = NAN;
}
});
}
void ReduceAndSolve6x6(float* A_reduction,
core::Tensor& delta,
core::Tensor& residual,
int64_t n,
const core::Device& device) {
core::Tensor output_29 =
core::Tensor::Empty({29}, core::Dtype::Float32, device);
float* output_29_data = output_29.GetDataPtr<float>();
// Reduction of {29, N} to {29}.
for (int i = 0; i < 29; i++) {
// Determine temporary device storage requirements.
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
// Allocate temporary storage.
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction.
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
cudaFree(d_temp_storage);
}
DecodeAndSolve6x6(output_29, delta, residual);
}
void ComputePosePointToPlaneCUDA(const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to {29} where
// [0, 20] elements are used to construct {6,6} shaped symmetric AtA matrix,
// [21, 26] elements are used to construct {6} AtB matrix, element [27]
// stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_ij[6];
float r;
bool valid = GetJacobianPointToPlane(
workload_idx, cols, depth_diff, source_vertex_indexer,
target_vertex_indexer, target_normal_indexer, ti, J_ij,
r);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_ij[j] * J_ij[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_ij[j] * r;
}
A_reduction[n * 27 + workload_idx] = r * r;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseIntensityCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped symmetric
// AtA matrix, [21, 26] elements are used to construct {6} AtB matrix,
// element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6];
float r_I;
bool valid = GetJacobianIntensity(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, r_I);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] = J_I[j] * J_I[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_I[j] * r_I;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target.To(device, core::Dtype::Float32);
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped symmetric
// AtA matrix, [21, 26] elements are used to construct {6} AtB matrix,
// element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6], J_D[6];
float r_I, r_D;
bool valid = GetJacobianHybrid(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, J_D, r_I, r_D);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_I[j] * J_I[k] + J_D[j] * J_D[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] =
J_I[j] * r_I + J_D[j] * r_D;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I + r_D * r_D;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
1599f096ace2b85edbb76f27780dfcc83e7a643e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "rayCaster.hpp"
#include "mortonCodeUtil.hpp"
#include "cuda_help.hpp"
#include <hip/hip_runtime.h>
#include <cutil_math.h>
#include <iostream>
#include <fstream>
#define posToIndex(i,j,k,d) ((k)+(j)*(d)+(i)*(d)*(d))
inline __device__ bool _cuda_RayAABB(float3 origin, float3 dir, float * tnear, float * tfar, int3 minBox, int3 maxBox)
{
bool hit = true;
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float divx = 1 / dir.x;
if (divx >= 0)
{
tmin = (minBox.x - origin.x)*divx;
tmax = (maxBox.x - origin.x)*divx;
}
else
{
tmin = (maxBox.x - origin.x)*divx;
tmax = (minBox.x - origin.x)*divx;
}
float divy = 1 / dir.y;
if (divy >= 0)
{
tymin = (minBox.y - origin.y)*divy;
tymax = (maxBox.y - origin.y)*divy;
}
else
{
tymin = (maxBox.y - origin.y)*divy;
tymax = (minBox.y - origin.y)*divy;
}
if ( (tmin > tymax) || (tymin > tmax) )
{
hit = false;
}
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float divz = 1 / dir.z;
if (divz >= 0)
{
tzmin = (minBox.z - origin.z)*divz;
tzmax = (maxBox.z - origin.z)*divz;
}
else
{
tzmin = (maxBox.z - origin.z)*divz;
tzmax = (minBox.z - origin.z)*divz;
}
if ( (tmin > tzmax) || (tzmin > tmax) )
{
hit = false;
}
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if (tmin<0.0)
*tnear=0.0;
else
*tnear=tmin;
*tfar=tmax;
return hit;
}
inline __device__ float getElement(int x, int y, int z, float * data, int3 dim)
{
return data[posToIndex(x,y,z,dim.x)];
}
__device__ float getElementInterpolate(float3 pos, float * data, int3 minBox, int3 dim)
{
float3 posR;
float3 pi = make_float3(modff(pos.x,&posR.x), modff(pos.y,&posR.y), modff(pos.z,&posR.z));
int x0 = posR.x - minBox.x;
int y0 = posR.y - minBox.y;
int z0 = posR.z - minBox.z;
int x1 = x0 + 1;
int y1 = y0 + 1;
int z1 = z0 + 1;
float c00 = getElement(x0,y0,z0,data,dim) * (1.0f - pi.x) + getElement(x1,y0,z0,data,dim) * pi.x;
float c01 = getElement(x0,y0,z1,data,dim) * (1.0f - pi.x) + getElement(x1,y0,z1,data,dim) * pi.x;
float c10 = getElement(x0,y1,z0,data,dim) * (1.0f - pi.x) + getElement(x1,y1,z0,data,dim) * pi.x;
float c11 = getElement(x0,y1,z1,data,dim) * (1.0f - pi.x) + getElement(x1,y1,z1,data,dim) * pi.x;
float c0 = c00 * (1.0f - pi.y) + c10 * pi.y;
float c1 = c01 * (1.0f - pi.y) + c11 * pi.y;
return c0 * (1.0f - pi.z) + c1 * pi.z;
#if 0
float p000 = getElement(x0,y0,z1,data,dim);
float p001 = getElement(x0,y1,z1,data,dim);
float p010 = getElement(x0,y0,z0,data,dim);
float p011 = getElement(x0,y1,z0,data,dim);
float p100 = getElement(x1,y0,z1,data,dim);
float p101 = getElement(x1,y1,z1,data,dim);
float p110 = getElement(x1,y0,z0,data,dim);
float p111 = getElement(x1,y1,z0,data,dim);
// float3 pi = make_float3(modff(posR.x), modff(posR.y-(float)y0, posR.z-(float)z0);
return p000 * (1.0-pi.x) * (1.0-pi.y) * (1.0-pi.z) + \
p001 * (1.0-pi.x) * (1.0-pi.y) * pi.z + \
p010 * (1.0-pi.x) * pi.y * (1.0-pi.z) + \
p011 * (1.0-pi.x) * pi.y * pi.z + \
p100 * pi.x * (1.0-pi.y) * (1.0-pi.z) + \
p101 * pi.x * (1.0-pi.y) * pi.z + \
p110 * pi.x * pi.y * (1.0-pi.z) + \
p111 * pi.x * pi.y * pi.z;
#endif
}
inline __device__ float3 getNormal(float3 pos, float * data, int3 minBox, int3 maxBox)
{
return normalize(make_float3(
(getElementInterpolate(make_float3(pos.x-1.0f,pos.y,pos.z),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x+1.0f,pos.y,pos.z),data,minBox,maxBox)) /2.0f,
(getElementInterpolate(make_float3(pos.x,pos.y-1.0f,pos.z),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x,pos.y+1.0f,pos.z),data,minBox,maxBox)) /2.0f,
(getElementInterpolate(make_float3(pos.x,pos.y,pos.z-1.0f),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x,pos.y,pos.z+1.0f),data,minBox,maxBox)) /2.0f));
}
__global__ void cuda_rayCaster(int numRays, float3 ligth, float3 origin, float * rays, float iso, visibleCube_t * cube, int3 dimCube, int3 cubeInc, int levelO, int levelC, int nLevel, float * screen)
{
unsigned int tid = blockIdx.y * blockDim.x * gridDim.y + blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numRays)
{
if (cube[tid].state == NOCUBE)
{
screen[tid*3] = 1.0f;
screen[tid*3+1] = 1.0f;
screen[tid*3+2] = 1.0f;
cube[tid].state = PAINTED;
return;
}
else if (cube[tid].state == CACHED)
{
float tnear;
float tfar;
// To do test intersection real cube position
int3 minBox = getMinBoxIndex2(cube[tid].id, levelO, nLevel);
int dim = powf(2,nLevel-levelO);
int3 maxBox = minBox + make_int3(dim,dim,dim);
float3 ray = make_float3(rays[tid], rays[tid+numRays], rays[tid+2*numRays]);
if (_cuda_RayAABB(origin, ray, &tnear, &tfar, minBox, maxBox))
{
bool hit = false;
float3 Xnear;
float3 Xfar;
float3 Xnew;
// To ray caster is needed bigger cube, so add cube inc
minBox = getMinBoxIndex2(cube[tid].cubeID, levelC, nLevel) - cubeInc;
maxBox = dimCube + 2*cubeInc;
Xnear = origin + tnear * ray;
Xfar = Xnear;
Xnew = Xnear;
bool primera = true;
float ant = 0.0;
float sig = 0.0;
int steps = 0;
float3 vStep = 0.5* ray;
int maxStep = ceil((tfar-tnear)/0.5);
/* CASOS A ESTUDIAR
tnear==tfar MISS
tfar<tnear MISS
tfar-tfar< step STUDY BETWEEN POINTS
*/
while(steps <= maxStep)
{
if (primera)
{
primera = false;
ant = getElementInterpolate(Xnear, cube[tid].data, minBox, maxBox);
Xfar = Xnear;
}
else
{
sig = getElementInterpolate(Xnear, cube[tid].data, minBox, maxBox);
if (( ((iso-ant)<0.0) && ((iso-sig)<0.0)) || ( ((iso-ant)>0.0) && ((iso-sig)>0.0)))
{
ant = sig;
Xfar=Xnear;
}
else
{
/*
Si el valor en los extremos es V_s y V_f y el valor que buscas (el de la isosuperficie) es V, S es el punto inicial y F es el punto final.
a = (V - V_s) / (V_f - V_s)
I = S * (1 - a) + V * a (creo que esta frmula te la puse al revs en el caso del color, revsala)
*/
#if 0
// Intersection Refinament using an iterative bisection procedure
float valueE = 0.0;
for(int k = 0; k<5;k++) // XXX Cuanto ms grande mejor debera ser el renderizado
{
Xnew = (Xfar - Xnear)*((iso-sig)/(ant-sig))+Xnear;
valueE = getElementInterpolate(Xnew, cube[tid].data, minBox, maxBox);
if (valueE>iso)
Xnear=Xnew;
else
Xfar=Xnew;
}
#endif
float a = (iso-ant)/(sig-ant);
Xnew = Xfar*(1.0f-a)+ Xnear*a;
hit = true;
steps = maxStep;
}
}
Xnear += vStep;
steps++;
}
if (hit)
{
float3 n = getNormal(Xnew, cube[tid].data, minBox, maxBox);
float3 l = Xnew - ligth;
l = normalize(l);
float dif = fabs(n.x*l.x + n.y*l.y + n.z*l.z);
float a = Xnew.y/256.0f;
screen[tid*3] =(1-a)*dif;// + 1.0f*spec;
screen[tid*3+1] =(a)*dif;// + 1.0f*spec;
screen[tid*3+2] =0.0f*dif;// + 1.0f*spec;
cube[tid].state= PAINTED;
}
else
{
cube[tid].state = NOCUBE;
}
}
#if _DEBUG_
else
{
printf("Error, octree is not working %lld %d \n",cube[tid].id, getIndexLevel(cube[tid].id));
}
#endif
}
}
}
/*
******************************************************************************************************
************ rayCaster methods ***************************************************************************
******************************************************************************************************
*/
rayCaster::rayCaster(float isosurface, rayCaster_options_t * options)
{
iso = isosurface;
lightPosition = options->ligth_position;
step = 0.5f;
}
rayCaster::~rayCaster()
{
}
void rayCaster::increaseStep()
{
step += 0.01f;
}
void rayCaster::decreaseStep()
{
step += step == 0.01f ? 0.0f : 0.01f;
}
void rayCaster::render(float * rays, int numRays, float3 camera_position, int levelO, int levelC, int nLevel, visibleCube_t * cube, int3 cubeDim, int3 cubeInc, float * pixelBuffer, hipStream_t stream)
{
dim3 threads = getThreads(numRays);
dim3 blocks = getBlocks(numRays);
// std::cerr<<"Launching kernek blocks ("<<blocks.x<<","<<blocks.y<<","<<blocks.z<<") threads ("<<threads.x<<","<<threads.y<<","<<threads.z<<") error: "<< hipGetErrorString(hipGetLastError())<<std::endl;
hipLaunchKernelGGL(( cuda_rayCaster), dim3(blocks), dim3(threads), 0, stream, numRays, lightPosition, camera_position, rays, iso, cube, cubeDim, cubeInc, levelO, levelC, nLevel, pixelBuffer);
// std::cerr<<"Synchronizing rayCaster: " << hipGetErrorString(hipDeviceSynchronize()) << std::endl;
return;
}
|
1599f096ace2b85edbb76f27780dfcc83e7a643e.cu
|
#include "rayCaster.hpp"
#include "mortonCodeUtil.hpp"
#include "cuda_help.hpp"
#include <cuda_runtime.h>
#include <cutil_math.h>
#include <iostream>
#include <fstream>
#define posToIndex(i,j,k,d) ((k)+(j)*(d)+(i)*(d)*(d))
inline __device__ bool _cuda_RayAABB(float3 origin, float3 dir, float * tnear, float * tfar, int3 minBox, int3 maxBox)
{
bool hit = true;
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float divx = 1 / dir.x;
if (divx >= 0)
{
tmin = (minBox.x - origin.x)*divx;
tmax = (maxBox.x - origin.x)*divx;
}
else
{
tmin = (maxBox.x - origin.x)*divx;
tmax = (minBox.x - origin.x)*divx;
}
float divy = 1 / dir.y;
if (divy >= 0)
{
tymin = (minBox.y - origin.y)*divy;
tymax = (maxBox.y - origin.y)*divy;
}
else
{
tymin = (maxBox.y - origin.y)*divy;
tymax = (minBox.y - origin.y)*divy;
}
if ( (tmin > tymax) || (tymin > tmax) )
{
hit = false;
}
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float divz = 1 / dir.z;
if (divz >= 0)
{
tzmin = (minBox.z - origin.z)*divz;
tzmax = (maxBox.z - origin.z)*divz;
}
else
{
tzmin = (maxBox.z - origin.z)*divz;
tzmax = (minBox.z - origin.z)*divz;
}
if ( (tmin > tzmax) || (tzmin > tmax) )
{
hit = false;
}
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
if (tmin<0.0)
*tnear=0.0;
else
*tnear=tmin;
*tfar=tmax;
return hit;
}
inline __device__ float getElement(int x, int y, int z, float * data, int3 dim)
{
return data[posToIndex(x,y,z,dim.x)];
}
__device__ float getElementInterpolate(float3 pos, float * data, int3 minBox, int3 dim)
{
float3 posR;
float3 pi = make_float3(modff(pos.x,&posR.x), modff(pos.y,&posR.y), modff(pos.z,&posR.z));
int x0 = posR.x - minBox.x;
int y0 = posR.y - minBox.y;
int z0 = posR.z - minBox.z;
int x1 = x0 + 1;
int y1 = y0 + 1;
int z1 = z0 + 1;
float c00 = getElement(x0,y0,z0,data,dim) * (1.0f - pi.x) + getElement(x1,y0,z0,data,dim) * pi.x;
float c01 = getElement(x0,y0,z1,data,dim) * (1.0f - pi.x) + getElement(x1,y0,z1,data,dim) * pi.x;
float c10 = getElement(x0,y1,z0,data,dim) * (1.0f - pi.x) + getElement(x1,y1,z0,data,dim) * pi.x;
float c11 = getElement(x0,y1,z1,data,dim) * (1.0f - pi.x) + getElement(x1,y1,z1,data,dim) * pi.x;
float c0 = c00 * (1.0f - pi.y) + c10 * pi.y;
float c1 = c01 * (1.0f - pi.y) + c11 * pi.y;
return c0 * (1.0f - pi.z) + c1 * pi.z;
#if 0
float p000 = getElement(x0,y0,z1,data,dim);
float p001 = getElement(x0,y1,z1,data,dim);
float p010 = getElement(x0,y0,z0,data,dim);
float p011 = getElement(x0,y1,z0,data,dim);
float p100 = getElement(x1,y0,z1,data,dim);
float p101 = getElement(x1,y1,z1,data,dim);
float p110 = getElement(x1,y0,z0,data,dim);
float p111 = getElement(x1,y1,z0,data,dim);
// float3 pi = make_float3(modff(posR.x), modff(posR.y-(float)y0, posR.z-(float)z0);
return p000 * (1.0-pi.x) * (1.0-pi.y) * (1.0-pi.z) + \
p001 * (1.0-pi.x) * (1.0-pi.y) * pi.z + \
p010 * (1.0-pi.x) * pi.y * (1.0-pi.z) + \
p011 * (1.0-pi.x) * pi.y * pi.z + \
p100 * pi.x * (1.0-pi.y) * (1.0-pi.z) + \
p101 * pi.x * (1.0-pi.y) * pi.z + \
p110 * pi.x * pi.y * (1.0-pi.z) + \
p111 * pi.x * pi.y * pi.z;
#endif
}
inline __device__ float3 getNormal(float3 pos, float * data, int3 minBox, int3 maxBox)
{
return normalize(make_float3(
(getElementInterpolate(make_float3(pos.x-1.0f,pos.y,pos.z),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x+1.0f,pos.y,pos.z),data,minBox,maxBox)) /2.0f,
(getElementInterpolate(make_float3(pos.x,pos.y-1.0f,pos.z),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x,pos.y+1.0f,pos.z),data,minBox,maxBox)) /2.0f,
(getElementInterpolate(make_float3(pos.x,pos.y,pos.z-1.0f),data,minBox,maxBox) - getElementInterpolate(make_float3(pos.x,pos.y,pos.z+1.0f),data,minBox,maxBox)) /2.0f));
}
__global__ void cuda_rayCaster(int numRays, float3 ligth, float3 origin, float * rays, float iso, visibleCube_t * cube, int3 dimCube, int3 cubeInc, int levelO, int levelC, int nLevel, float * screen)
{
unsigned int tid = blockIdx.y * blockDim.x * gridDim.y + blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numRays)
{
if (cube[tid].state == NOCUBE)
{
screen[tid*3] = 1.0f;
screen[tid*3+1] = 1.0f;
screen[tid*3+2] = 1.0f;
cube[tid].state = PAINTED;
return;
}
else if (cube[tid].state == CACHED)
{
float tnear;
float tfar;
// To do test intersection real cube position
int3 minBox = getMinBoxIndex2(cube[tid].id, levelO, nLevel);
int dim = powf(2,nLevel-levelO);
int3 maxBox = minBox + make_int3(dim,dim,dim);
float3 ray = make_float3(rays[tid], rays[tid+numRays], rays[tid+2*numRays]);
if (_cuda_RayAABB(origin, ray, &tnear, &tfar, minBox, maxBox))
{
bool hit = false;
float3 Xnear;
float3 Xfar;
float3 Xnew;
// To ray caster is needed bigger cube, so add cube inc
minBox = getMinBoxIndex2(cube[tid].cubeID, levelC, nLevel) - cubeInc;
maxBox = dimCube + 2*cubeInc;
Xnear = origin + tnear * ray;
Xfar = Xnear;
Xnew = Xnear;
bool primera = true;
float ant = 0.0;
float sig = 0.0;
int steps = 0;
float3 vStep = 0.5* ray;
int maxStep = ceil((tfar-tnear)/0.5);
/* CASOS A ESTUDIAR
tnear==tfar MISS
tfar<tnear MISS
tfar-tfar< step STUDY BETWEEN POINTS
*/
while(steps <= maxStep)
{
if (primera)
{
primera = false;
ant = getElementInterpolate(Xnear, cube[tid].data, minBox, maxBox);
Xfar = Xnear;
}
else
{
sig = getElementInterpolate(Xnear, cube[tid].data, minBox, maxBox);
if (( ((iso-ant)<0.0) && ((iso-sig)<0.0)) || ( ((iso-ant)>0.0) && ((iso-sig)>0.0)))
{
ant = sig;
Xfar=Xnear;
}
else
{
/*
Si el valor en los extremos es V_s y V_f y el valor que buscas (el de la isosuperficie) es V, S es el punto inicial y F es el punto final.
a = (V - V_s) / (V_f - V_s)
I = S * (1 - a) + V * a (creo que esta fórmula te la puse al revés en el caso del color, revísala)
*/
#if 0
// Intersection Refinament using an iterative bisection procedure
float valueE = 0.0;
for(int k = 0; k<5;k++) // XXX Cuanto más grande mejor debería ser el renderizado
{
Xnew = (Xfar - Xnear)*((iso-sig)/(ant-sig))+Xnear;
valueE = getElementInterpolate(Xnew, cube[tid].data, minBox, maxBox);
if (valueE>iso)
Xnear=Xnew;
else
Xfar=Xnew;
}
#endif
float a = (iso-ant)/(sig-ant);
Xnew = Xfar*(1.0f-a)+ Xnear*a;
hit = true;
steps = maxStep;
}
}
Xnear += vStep;
steps++;
}
if (hit)
{
float3 n = getNormal(Xnew, cube[tid].data, minBox, maxBox);
float3 l = Xnew - ligth;
l = normalize(l);
float dif = fabs(n.x*l.x + n.y*l.y + n.z*l.z);
float a = Xnew.y/256.0f;
screen[tid*3] =(1-a)*dif;// + 1.0f*spec;
screen[tid*3+1] =(a)*dif;// + 1.0f*spec;
screen[tid*3+2] =0.0f*dif;// + 1.0f*spec;
cube[tid].state= PAINTED;
}
else
{
cube[tid].state = NOCUBE;
}
}
#if _DEBUG_
else
{
printf("Error, octree is not working %lld %d \n",cube[tid].id, getIndexLevel(cube[tid].id));
}
#endif
}
}
}
/*
******************************************************************************************************
************ rayCaster methods ***************************************************************************
******************************************************************************************************
*/
rayCaster::rayCaster(float isosurface, rayCaster_options_t * options)
{
iso = isosurface;
lightPosition = options->ligth_position;
step = 0.5f;
}
rayCaster::~rayCaster()
{
}
void rayCaster::increaseStep()
{
step += 0.01f;
}
void rayCaster::decreaseStep()
{
step += step == 0.01f ? 0.0f : 0.01f;
}
void rayCaster::render(float * rays, int numRays, float3 camera_position, int levelO, int levelC, int nLevel, visibleCube_t * cube, int3 cubeDim, int3 cubeInc, float * pixelBuffer, cudaStream_t stream)
{
dim3 threads = getThreads(numRays);
dim3 blocks = getBlocks(numRays);
// std::cerr<<"Launching kernek blocks ("<<blocks.x<<","<<blocks.y<<","<<blocks.z<<") threads ("<<threads.x<<","<<threads.y<<","<<threads.z<<") error: "<< cudaGetErrorString(cudaGetLastError())<<std::endl;
cuda_rayCaster<<<blocks, threads, 0, stream>>>(numRays, lightPosition, camera_position, rays, iso, cube, cubeDim, cubeInc, levelO, levelC, nLevel, pixelBuffer);
// std::cerr<<"Synchronizing rayCaster: " << cudaGetErrorString(cudaDeviceSynchronize()) << std::endl;
return;
}
|
4ce75d2d5b0ce72b8299fae9d35918e45cf4c685.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void mul_(float *input, float factor, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = input[id] * factor;
}
}
|
4ce75d2d5b0ce72b8299fae9d35918e45cf4c685.cu
|
#include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void mul_(float *input, float factor, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = input[id] * factor;
}
}
|
f1389861aa3dd2b3994c983684895e6ffc7aa2f9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "include/common.h"
#include "include/job_base.h"
#include <bits/floatn.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
#include <include/linked_list.h>
#include <include/machine_base.h>
#include <include/chromosome_base.h>
#include <tests/include/test_chromosome_base.h>
#include <tests/include/test_machine_base.h>
#include <tests/include/def.h>
size_t memory_usage = 0;
hipError_t test_cudaMalloc(void ** ptr, size_t size){
memory_usage += size;
return hipMalloc(ptr, size);
}
#define hipMalloc test_cudaMalloc
extern int JOB_AMOUNT;
extern int MACHINE_AMOUNT;
extern int CHROMOSOME_AMOUNT;
extern int GENERATIONS;
class TestChromosomeBaseDevice : public testing::Test{
public:
Machine ** machines;
Machine ** address_machines_arr;
job_t ** jobs;
job_t ** address_jobs_arr;
Chromosome * chromosomes;
process_time_t **processTimes;
process_time_t **address_process_time_arr;
double *genes;
double *host_genes;
unsigned int *device_can_run_machine_size;
unsigned int *host_can_run_machine_size;
size_t gene_size;
list_operations_t *ops;
machine_base_operations_t *mbops;
job_base_operations_t *jbops;
int R_JOB_AMOUNT;
int R_MACHINE_AMOUNT;
int R_CHROMOSOME_AMOUNT;
void random_shuffle(double *genes, size_t size);
void SetUp() override;
void TearDown() override;
};
void TestChromosomeBaseDevice::random_shuffle(double *genes, size_t size){
for(unsigned int i = 0; i < size; ++i){
genes[i] = (double)rand() / (double)RAND_MAX;
}
}
void TestChromosomeBaseDevice::SetUp(){
R_JOB_AMOUNT = JOB_AMOUNT * (CHROMOSOME_AMOUNT<<1);
R_MACHINE_AMOUNT = MACHINE_AMOUNT * (CHROMOSOME_AMOUNT<<1);
R_CHROMOSOME_AMOUNT = CHROMOSOME_AMOUNT << 1;
// allocating jobs
cudaCheck(hipMalloc((void**)&jobs, sizeof(job_t*) * R_CHROMOSOME_AMOUNT), "allocating jobs...");
cudaCheck(hipHostMalloc((void**)&address_jobs_arr, sizeof(job_t*) * R_CHROMOSOME_AMOUNT), "allocating host address_job_arr");
job_t * tmp;
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(hipMalloc((void**)&tmp, sizeof(job_t) * JOB_AMOUNT), "allocating jobs for a chromosome");
address_jobs_arr[i] = tmp;
}
cudaCheck(hipMemcpy(jobs, address_jobs_arr, sizeof(job_t*) * R_CHROMOSOME_AMOUNT, hipMemcpyHostToDevice), "copy jobs from host to device");
// allocating machines
cudaCheck( hipMalloc((void**)&machines, sizeof(Machine*)*R_CHROMOSOME_AMOUNT), "alloating machines...");
cudaCheck( hipHostMalloc((void**)&address_machines_arr, sizeof(Machine*)*R_CHROMOSOME_AMOUNT), "allocating host address_machines_arr");
Machine *machines_tmp;
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck( hipMalloc((void**)&machines_tmp, sizeof(Machine)*MACHINE_AMOUNT), "allocating machines for a chromosome");
address_machines_arr[i] = machines_tmp;
}
cudaCheck( hipMemcpy(machines, address_machines_arr, sizeof(Machine*)*R_CHROMOSOME_AMOUNT, hipMemcpyHostToDevice), "copy machines from host to device");
// allocating chromosomes
cudaCheck( hipMalloc((void**)&chromosomes, sizeof(Chromosome)*R_CHROMOSOME_AMOUNT), "allocating chromosomes");
// prepare host_can_run_machine_size
cudaCheck( hipHostMalloc((void**)&host_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT), "allocating host_can_run_machine_size on host");
cudaCheck( hipMalloc((void**)&device_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT), "allocating device_can_run_machines_size on device");
for(int i = 0; i < JOB_AMOUNT; ++i){
host_can_run_machine_size[i] = rand() % 200 + 400;
}
cudaCheck(hipMemcpy(device_can_run_machine_size, host_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT, hipMemcpyHostToDevice), "copy can run tool");
// prepare process_time
cudaCheck( hipHostMalloc((void**)&address_process_time_arr, sizeof(process_time_t *)*JOB_AMOUNT), "allocating process time on host");
cudaCheck( hipMalloc((void**)&processTimes, sizeof(process_time_t *)*JOB_AMOUNT), "allocating process time on device");
process_time_t *process_time_tmp_host;
process_time_t *process_time_tmp;
for(int i = 0; i < JOB_AMOUNT; ++i){
cudaCheck(hipMalloc((void**)&process_time_tmp, sizeof(process_time_t) * host_can_run_machine_size[i]), "allocating process time on device");
cudaCheck(hipHostMalloc((void**)&process_time_tmp_host, sizeof(process_time_t) * host_can_run_machine_size[i]), "allocating process time on host");
for(unsigned int j = 0; j < host_can_run_machine_size[i]; ++j){
process_time_tmp_host[j].machine_no = rand() % MACHINE_AMOUNT;
process_time_tmp_host[j].process_time = rand() % 1000;
}
cudaCheck(hipMemcpy(process_time_tmp, process_time_tmp_host, sizeof(process_time_t) * host_can_run_machine_size[i], hipMemcpyHostToDevice), "copy process time from host to deivce");
cudaCheck(hipHostFree(process_time_tmp_host), "cuda free process_time_tmp_host");
address_process_time_arr[i] = process_time_tmp;
}
cudaCheck( hipMemcpy(processTimes, address_process_time_arr, sizeof(process_time_t *)*JOB_AMOUNT, hipMemcpyHostToDevice), "copy can run tool from host to device");
// alloc genes
cudaCheck(hipMalloc((void**)&genes, sizeof(double)*(JOB_AMOUNT<<1)*(CHROMOSOME_AMOUNT<<1)),"cuda alloc genes");
cudaCheck(hipHostMalloc((void**)&host_genes, sizeof(double)*(JOB_AMOUNT<<1)*(CHROMOSOME_AMOUNT<<1)),"cuda malloc hostld");
// alloc ops
cudaCheck(hipMalloc((void**)&ops, sizeof(list_operations_t)), "alloc ops");
cudaCheck(hipMalloc((void**)&mbops, sizeof(machine_base_operations_t)), "alloc mbops");
cudaCheck(hipMalloc((void**)&jbops, sizeof(job_base_operations_t)), "alloc jbops");
}
void TestChromosomeBaseDevice::TearDown(){
// free ops
cudaCheck(hipFree(ops), "Free ops...");
cudaCheck(hipFree(mbops), "Free mbops");
cudaCheck(hipFree(jbops), "Free jbops");
// free jobs
cudaCheck(hipFree(jobs), "Free jobs");
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(hipFree(address_jobs_arr[i]), "Free an array of jobs");
}
cudaCheck( hipHostFree(address_jobs_arr), "Free address_job_arr");
// free machines
cudaCheck(hipFree(machines), "Free machines");
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(hipFree(address_machines_arr[i]), "Free an array of machines");
}
cudaCheck(hipHostFree(address_machines_arr), "Free addres_machines_arr");
// free can_run_machine
cudaCheck(hipFree(device_can_run_machine_size), "Free device_can_run_machine_size");
cudaCheck(hipHostFree(host_can_run_machine_size), "Free host_can_run_mahcine_size");
// free chromosomes
cudaCheck(hipFree(chromosomes), "Free chromosomes");
// free process time
cudaCheck(hipFree(processTimes), "Free processTimes");
for(int i = 0; i < JOB_AMOUNT; ++i){
cudaCheck(hipFree(address_process_time_arr[i]), "Free an array of process_time");
}
cudaCheck(hipHostFree(address_process_time_arr), "Free address_process_time_arr");
}
__global__ void machineSetup(Machine **machines, int MACHINE_AMOUNT, int CHROMOSOME_AMOUNT){
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < CHROMOSOME_AMOUNT && y < MACHINE_AMOUNT){
// machines[x][y].base.init = initMachineBase;
initMachine(&machines[x][y]);
}
}
__global__ void chromosomeSetup(Chromosome *chromosomes, double * genes, int JOB_AMOUNT, int CHROMOSOME_AMOUNT){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < CHROMOSOME_AMOUNT){
chromosomes[idx].val = idx;
chromosomes[idx].base.gene_size = JOB_AMOUNT<<1;
chromosomes[idx].base.chromosome_no = idx;
initChromosomeBase(&chromosomes[idx].base, genes + idx*(JOB_AMOUNT<<1));
}
}
__global__ void jobSetup(job_t ** jobs, unsigned int *can_run_tool_size, process_time_t ** process_times, job_base_operations_t *ops, int JOB_AMOUNT, int CHROMOSOME_AMOUNT){
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
initJob(&jobs[x][y]);
jobs[x][y].base.job_no = y;
ops->setProcessTime(&jobs[x][y].base, process_times[y], can_run_tool_size[y]);
// jobs[x][y].base.setProcessTime(&jobs[x][y].base, process_times[y], can_run_tool_size[y]);
}
}
__global__ void jobBindGenes(job_t **jobs, Chromosome * chromosomes, job_base_operations_t *jbops, int JOB_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < R_CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
jbops->setMsGenePointer(&jobs[x][y].base, chromosomes[x].base.ms_genes + y);
jbops->setOsSeqGenePointer(&jobs[x][y].base, chromosomes[x].base.os_genes + y);
}
}
__global__ void machineSelection(job_t **jobs, job_base_operations_t *jbops, int JOB_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int machine_idx;
if(x < R_CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
machine_idx = jbops->machineSelection(&jobs[x][y].base);
jobs[x][y].base.machine_no = jobs[x][y].base.process_time[machine_idx].machine_no;
}
}
__global__ void machineSelection2(job_t **jobs, Machine **machines, machine_base_operations_t *ops, int JOB_AMOUNT, int MACHINE_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < R_CHROMOSOME_AMOUNT && y < MACHINE_AMOUNT){
for(int i = 0; i < JOB_AMOUNT; ++i){
if(jobs[x][i].base.machine_no == y){
ops->addJob(&machines[x][y].base, &jobs[x][i].ele);
// machines[x][y].base.addJob(&machines[x][y].base, &jobs[x][i]);
}
}
}
}
__global__ void sortJob(Machine **machines, list_operations_t *ops, int MACHINE_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x == 0 && y == 0){
__sortJob(&machines[x][y].base, ops);
}
}
__global__ void operationSetup(list_operations_t *ops,job_base_operations_t *jbops, machine_base_operations_t *mbops){
ops->init = initList;
ops->setNext = __listEleSetNext;
ops->setPrev = __listEleSetPrev;
machine_base_operations_t mbtmp = MACHINE_BASE_OPS;
*mbops = mbtmp;
job_base_operations_t jbtmp = JOB_BASE_OPS;
*jbops = jbtmp;
}
TEST_F(TestChromosomeBaseDevice, test_chromosome_base_device){
// setup grid dimension
dim3 machine_chromosome_thread(2, 512);
dim3 machine_chromosome_block(R_CHROMOSOME_AMOUNT >> 1, MACHINE_AMOUNT >> 8);
dim3 job_chromosome_thread(2, 512);
dim3 job_chromosome_block(R_CHROMOSOME_AMOUNT >> 1, JOB_AMOUNT >> 8); // (R_CHROMOSOME_AMOUNT / 32, )
// setup kernel
hipLaunchKernelGGL(( operationSetup), dim3(1), dim3(1), 0, 0, ops, jbops, mbops);
hipLaunchKernelGGL(( jobSetup), dim3(job_chromosome_block), dim3(job_chromosome_thread), 0, 0, jobs, device_can_run_machine_size, processTimes, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
hipLaunchKernelGGL(( machineSetup), dim3(machine_chromosome_block), dim3(machine_chromosome_thread), 0, 0, machines, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
hipLaunchKernelGGL(( chromosomeSetup), dim3(100), dim3(100), 0, 0, chromosomes, genes, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
hipLaunchKernelGGL(( jobBindGenes), dim3(job_chromosome_block), dim3(job_chromosome_thread), 0, 0, jobs, chromosomes, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
hipDeviceSynchronize();
PRINTF("Device Memory Usage = %lu\n", memory_usage);
hipEvent_t startEvent, stopEvent;
cudaCheck(hipEventCreate(&startEvent), "create start event");
cudaCheck(hipEventCreate(&stopEvent), "create stop event");
// start computing...
PRINTF("Start Computing...\n");
cudaCheck(hipEventRecord(startEvent, 0), "cuda event record start event");
hipLaunchKernelGGL(( machineSelection), dim3(job_chromosome_block), dim3(job_chromosome_thread), 0, 0, jobs, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT); // machine selection
PRINTF("Finish machine selection part 1\n");
PRINTF("Start machine selection part2\n");
hipLaunchKernelGGL(( machineSelection2), dim3(machine_chromosome_block), dim3(machine_chromosome_thread), 0, 0, jobs, machines, mbops, JOB_AMOUNT, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
PRINTF("Finish machine selection part2\n");
hipLaunchKernelGGL(( sortJob), dim3(machine_chromosome_block), dim3(machine_chromosome_thread), 0, 0, machines, ops, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
hipDeviceSynchronize();
PRINTF("Finish sorting\n");
cudaCheck(hipEventRecord(stopEvent, 0), "cuda event record stop event");
cudaCheck(hipEventSynchronize(stopEvent), "cuda event sync stop event");
float ms;
cudaCheck(hipEventElapsedTime(&ms, startEvent, stopEvent), "get elapsed time");
PRINTF("Elapsed Time : %.3f\n", ms / 1000.0);
}
|
f1389861aa3dd2b3994c983684895e6ffc7aa2f9.cu
|
#include "include/common.h"
#include "include/job_base.h"
#include <bits/floatn.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <include/linked_list.h>
#include <include/machine_base.h>
#include <include/chromosome_base.h>
#include <tests/include/test_chromosome_base.h>
#include <tests/include/test_machine_base.h>
#include <tests/include/def.h>
size_t memory_usage = 0;
cudaError_t test_cudaMalloc(void ** ptr, size_t size){
memory_usage += size;
return cudaMalloc(ptr, size);
}
#define cudaMalloc test_cudaMalloc
extern int JOB_AMOUNT;
extern int MACHINE_AMOUNT;
extern int CHROMOSOME_AMOUNT;
extern int GENERATIONS;
class TestChromosomeBaseDevice : public testing::Test{
public:
Machine ** machines;
Machine ** address_machines_arr;
job_t ** jobs;
job_t ** address_jobs_arr;
Chromosome * chromosomes;
process_time_t **processTimes;
process_time_t **address_process_time_arr;
double *genes;
double *host_genes;
unsigned int *device_can_run_machine_size;
unsigned int *host_can_run_machine_size;
size_t gene_size;
list_operations_t *ops;
machine_base_operations_t *mbops;
job_base_operations_t *jbops;
int R_JOB_AMOUNT;
int R_MACHINE_AMOUNT;
int R_CHROMOSOME_AMOUNT;
void random_shuffle(double *genes, size_t size);
void SetUp() override;
void TearDown() override;
};
void TestChromosomeBaseDevice::random_shuffle(double *genes, size_t size){
for(unsigned int i = 0; i < size; ++i){
genes[i] = (double)rand() / (double)RAND_MAX;
}
}
void TestChromosomeBaseDevice::SetUp(){
R_JOB_AMOUNT = JOB_AMOUNT * (CHROMOSOME_AMOUNT<<1);
R_MACHINE_AMOUNT = MACHINE_AMOUNT * (CHROMOSOME_AMOUNT<<1);
R_CHROMOSOME_AMOUNT = CHROMOSOME_AMOUNT << 1;
// allocating jobs
cudaCheck(cudaMalloc((void**)&jobs, sizeof(job_t*) * R_CHROMOSOME_AMOUNT), "allocating jobs...");
cudaCheck(cudaMallocHost((void**)&address_jobs_arr, sizeof(job_t*) * R_CHROMOSOME_AMOUNT), "allocating host address_job_arr");
job_t * tmp;
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(cudaMalloc((void**)&tmp, sizeof(job_t) * JOB_AMOUNT), "allocating jobs for a chromosome");
address_jobs_arr[i] = tmp;
}
cudaCheck(cudaMemcpy(jobs, address_jobs_arr, sizeof(job_t*) * R_CHROMOSOME_AMOUNT, cudaMemcpyHostToDevice), "copy jobs from host to device");
// allocating machines
cudaCheck( cudaMalloc((void**)&machines, sizeof(Machine*)*R_CHROMOSOME_AMOUNT), "alloating machines...");
cudaCheck( cudaMallocHost((void**)&address_machines_arr, sizeof(Machine*)*R_CHROMOSOME_AMOUNT), "allocating host address_machines_arr");
Machine *machines_tmp;
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck( cudaMalloc((void**)&machines_tmp, sizeof(Machine)*MACHINE_AMOUNT), "allocating machines for a chromosome");
address_machines_arr[i] = machines_tmp;
}
cudaCheck( cudaMemcpy(machines, address_machines_arr, sizeof(Machine*)*R_CHROMOSOME_AMOUNT, cudaMemcpyHostToDevice), "copy machines from host to device");
// allocating chromosomes
cudaCheck( cudaMalloc((void**)&chromosomes, sizeof(Chromosome)*R_CHROMOSOME_AMOUNT), "allocating chromosomes");
// prepare host_can_run_machine_size
cudaCheck( cudaMallocHost((void**)&host_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT), "allocating host_can_run_machine_size on host");
cudaCheck( cudaMalloc((void**)&device_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT), "allocating device_can_run_machines_size on device");
for(int i = 0; i < JOB_AMOUNT; ++i){
host_can_run_machine_size[i] = rand() % 200 + 400;
}
cudaCheck(cudaMemcpy(device_can_run_machine_size, host_can_run_machine_size, sizeof(unsigned int)*JOB_AMOUNT, cudaMemcpyHostToDevice), "copy can run tool");
// prepare process_time
cudaCheck( cudaMallocHost((void**)&address_process_time_arr, sizeof(process_time_t *)*JOB_AMOUNT), "allocating process time on host");
cudaCheck( cudaMalloc((void**)&processTimes, sizeof(process_time_t *)*JOB_AMOUNT), "allocating process time on device");
process_time_t *process_time_tmp_host;
process_time_t *process_time_tmp;
for(int i = 0; i < JOB_AMOUNT; ++i){
cudaCheck(cudaMalloc((void**)&process_time_tmp, sizeof(process_time_t) * host_can_run_machine_size[i]), "allocating process time on device");
cudaCheck(cudaMallocHost((void**)&process_time_tmp_host, sizeof(process_time_t) * host_can_run_machine_size[i]), "allocating process time on host");
for(unsigned int j = 0; j < host_can_run_machine_size[i]; ++j){
process_time_tmp_host[j].machine_no = rand() % MACHINE_AMOUNT;
process_time_tmp_host[j].process_time = rand() % 1000;
}
cudaCheck(cudaMemcpy(process_time_tmp, process_time_tmp_host, sizeof(process_time_t) * host_can_run_machine_size[i], cudaMemcpyHostToDevice), "copy process time from host to deivce");
cudaCheck(cudaFreeHost(process_time_tmp_host), "cuda free process_time_tmp_host");
address_process_time_arr[i] = process_time_tmp;
}
cudaCheck( cudaMemcpy(processTimes, address_process_time_arr, sizeof(process_time_t *)*JOB_AMOUNT, cudaMemcpyHostToDevice), "copy can run tool from host to device");
// alloc genes
cudaCheck(cudaMalloc((void**)&genes, sizeof(double)*(JOB_AMOUNT<<1)*(CHROMOSOME_AMOUNT<<1)),"cuda alloc genes");
cudaCheck(cudaMallocHost((void**)&host_genes, sizeof(double)*(JOB_AMOUNT<<1)*(CHROMOSOME_AMOUNT<<1)),"cuda malloc hostld");
// alloc ops
cudaCheck(cudaMalloc((void**)&ops, sizeof(list_operations_t)), "alloc ops");
cudaCheck(cudaMalloc((void**)&mbops, sizeof(machine_base_operations_t)), "alloc mbops");
cudaCheck(cudaMalloc((void**)&jbops, sizeof(job_base_operations_t)), "alloc jbops");
}
void TestChromosomeBaseDevice::TearDown(){
// free ops
cudaCheck(cudaFree(ops), "Free ops...");
cudaCheck(cudaFree(mbops), "Free mbops");
cudaCheck(cudaFree(jbops), "Free jbops");
// free jobs
cudaCheck(cudaFree(jobs), "Free jobs");
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(cudaFree(address_jobs_arr[i]), "Free an array of jobs");
}
cudaCheck( cudaFreeHost(address_jobs_arr), "Free address_job_arr");
// free machines
cudaCheck(cudaFree(machines), "Free machines");
for(int i = 0; i < R_CHROMOSOME_AMOUNT; ++i){
cudaCheck(cudaFree(address_machines_arr[i]), "Free an array of machines");
}
cudaCheck(cudaFreeHost(address_machines_arr), "Free addres_machines_arr");
// free can_run_machine
cudaCheck(cudaFree(device_can_run_machine_size), "Free device_can_run_machine_size");
cudaCheck(cudaFreeHost(host_can_run_machine_size), "Free host_can_run_mahcine_size");
// free chromosomes
cudaCheck(cudaFree(chromosomes), "Free chromosomes");
// free process time
cudaCheck(cudaFree(processTimes), "Free processTimes");
for(int i = 0; i < JOB_AMOUNT; ++i){
cudaCheck(cudaFree(address_process_time_arr[i]), "Free an array of process_time");
}
cudaCheck(cudaFreeHost(address_process_time_arr), "Free address_process_time_arr");
}
__global__ void machineSetup(Machine **machines, int MACHINE_AMOUNT, int CHROMOSOME_AMOUNT){
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < CHROMOSOME_AMOUNT && y < MACHINE_AMOUNT){
// machines[x][y].base.init = initMachineBase;
initMachine(&machines[x][y]);
}
}
__global__ void chromosomeSetup(Chromosome *chromosomes, double * genes, int JOB_AMOUNT, int CHROMOSOME_AMOUNT){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < CHROMOSOME_AMOUNT){
chromosomes[idx].val = idx;
chromosomes[idx].base.gene_size = JOB_AMOUNT<<1;
chromosomes[idx].base.chromosome_no = idx;
initChromosomeBase(&chromosomes[idx].base, genes + idx*(JOB_AMOUNT<<1));
}
}
__global__ void jobSetup(job_t ** jobs, unsigned int *can_run_tool_size, process_time_t ** process_times, job_base_operations_t *ops, int JOB_AMOUNT, int CHROMOSOME_AMOUNT){
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if(x < CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
initJob(&jobs[x][y]);
jobs[x][y].base.job_no = y;
ops->setProcessTime(&jobs[x][y].base, process_times[y], can_run_tool_size[y]);
// jobs[x][y].base.setProcessTime(&jobs[x][y].base, process_times[y], can_run_tool_size[y]);
}
}
__global__ void jobBindGenes(job_t **jobs, Chromosome * chromosomes, job_base_operations_t *jbops, int JOB_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < R_CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
jbops->setMsGenePointer(&jobs[x][y].base, chromosomes[x].base.ms_genes + y);
jbops->setOsSeqGenePointer(&jobs[x][y].base, chromosomes[x].base.os_genes + y);
}
}
__global__ void machineSelection(job_t **jobs, job_base_operations_t *jbops, int JOB_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int machine_idx;
if(x < R_CHROMOSOME_AMOUNT && y < JOB_AMOUNT){
machine_idx = jbops->machineSelection(&jobs[x][y].base);
jobs[x][y].base.machine_no = jobs[x][y].base.process_time[machine_idx].machine_no;
}
}
__global__ void machineSelection2(job_t **jobs, Machine **machines, machine_base_operations_t *ops, int JOB_AMOUNT, int MACHINE_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < R_CHROMOSOME_AMOUNT && y < MACHINE_AMOUNT){
for(int i = 0; i < JOB_AMOUNT; ++i){
if(jobs[x][i].base.machine_no == y){
ops->addJob(&machines[x][y].base, &jobs[x][i].ele);
// machines[x][y].base.addJob(&machines[x][y].base, &jobs[x][i]);
}
}
}
}
__global__ void sortJob(Machine **machines, list_operations_t *ops, int MACHINE_AMOUNT, int R_CHROMOSOME_AMOUNT){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x == 0 && y == 0){
__sortJob(&machines[x][y].base, ops);
}
}
__global__ void operationSetup(list_operations_t *ops,job_base_operations_t *jbops, machine_base_operations_t *mbops){
ops->init = initList;
ops->setNext = __listEleSetNext;
ops->setPrev = __listEleSetPrev;
machine_base_operations_t mbtmp = MACHINE_BASE_OPS;
*mbops = mbtmp;
job_base_operations_t jbtmp = JOB_BASE_OPS;
*jbops = jbtmp;
}
TEST_F(TestChromosomeBaseDevice, test_chromosome_base_device){
// setup grid dimension
dim3 machine_chromosome_thread(2, 512);
dim3 machine_chromosome_block(R_CHROMOSOME_AMOUNT >> 1, MACHINE_AMOUNT >> 8);
dim3 job_chromosome_thread(2, 512);
dim3 job_chromosome_block(R_CHROMOSOME_AMOUNT >> 1, JOB_AMOUNT >> 8); // (R_CHROMOSOME_AMOUNT / 32, )
// setup kernel
operationSetup<<<1, 1>>>(ops, jbops, mbops);
jobSetup<<<job_chromosome_block, job_chromosome_thread>>>(jobs, device_can_run_machine_size, processTimes, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
machineSetup<<<machine_chromosome_block, machine_chromosome_thread>>>(machines, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
chromosomeSetup<<<100, 100>>>(chromosomes, genes, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
jobBindGenes<<<job_chromosome_block, job_chromosome_thread>>>(jobs, chromosomes, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT);
cudaDeviceSynchronize();
PRINTF("Device Memory Usage = %lu\n", memory_usage);
cudaEvent_t startEvent, stopEvent;
cudaCheck(cudaEventCreate(&startEvent), "create start event");
cudaCheck(cudaEventCreate(&stopEvent), "create stop event");
// start computing...
PRINTF("Start Computing...\n");
cudaCheck(cudaEventRecord(startEvent, 0), "cuda event record start event");
machineSelection<<<job_chromosome_block, job_chromosome_thread>>>(jobs, jbops, JOB_AMOUNT, R_CHROMOSOME_AMOUNT); // machine selection
PRINTF("Finish machine selection part 1\n");
PRINTF("Start machine selection part2\n");
machineSelection2<<<machine_chromosome_block, machine_chromosome_thread>>>(jobs, machines, mbops, JOB_AMOUNT, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
PRINTF("Finish machine selection part2\n");
sortJob<<<machine_chromosome_block, machine_chromosome_thread>>>(machines, ops, MACHINE_AMOUNT, R_CHROMOSOME_AMOUNT);
cudaDeviceSynchronize();
PRINTF("Finish sorting\n");
cudaCheck(cudaEventRecord(stopEvent, 0), "cuda event record stop event");
cudaCheck(cudaEventSynchronize(stopEvent), "cuda event sync stop event");
float ms;
cudaCheck(cudaEventElapsedTime(&ms, startEvent, stopEvent), "get elapsed time");
PRINTF("Elapsed Time : %.3f\n", ms / 1000.0);
}
|
ba10f1cd5cc68bd9434f85939b722e4a7f1f3990.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
|
ba10f1cd5cc68bd9434f85939b722e4a7f1f3990.cu
|
#include "includes.h"
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
|
87963c1deced5e9083c5f370866388ffa43a73ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histogram_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *histogram = NULL;
hipMalloc(&histogram, XSIZE*YSIZE);
float *values = NULL;
hipMalloc(&values, XSIZE*YSIZE);
size_t nb = 1;
float bin_size = XSIZE*YSIZE;
float min = 1;
int bins = 1;
int nb_thread = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histogram_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,values,nb,bin_size,min,bins,nb_thread);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histogram_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,values,nb,bin_size,min,bins,nb_thread);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histogram_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, histogram,values,nb,bin_size,min,bins,nb_thread);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
87963c1deced5e9083c5f370866388ffa43a73ec.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histogram_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *histogram = NULL;
cudaMalloc(&histogram, XSIZE*YSIZE);
float *values = NULL;
cudaMalloc(&values, XSIZE*YSIZE);
size_t nb = 1;
float bin_size = XSIZE*YSIZE;
float min = 1;
int bins = 1;
int nb_thread = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histogram_cuda<<<gridBlock,threadBlock>>>(histogram,values,nb,bin_size,min,bins,nb_thread);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histogram_cuda<<<gridBlock,threadBlock>>>(histogram,values,nb,bin_size,min,bins,nb_thread);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histogram_cuda<<<gridBlock,threadBlock>>>(histogram,values,nb,bin_size,min,bins,nb_thread);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
51e8a1e90b8fa6cecb6e85bbbc3ce2302d6e8321.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <basicOps.cuh>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <float.h>
const int NUM_THREADS = 32;
__global__ void kGetNonZeroElements(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
atomicAdd(&out[0],A[i] != 0.0f ? 1.0f : 0.0f);
}
__global__ void kGetNonZeroColumns(float *A, float *out, int rows, int cols)
{
const int myCol = (blockIdx.x * blockDim.x) + threadIdx.x;
float result = 0.0f;
if(myCol < cols)
{
for (unsigned int i = 0;i < rows; i++)
{
if(A[(myCol*rows) + i] != 0.0f)
result = 1.0f;
}
atomicAdd(&out[0],result);
}
}
__global__ void kRenormalizeWeights(float *w, float *unit_sums, float limit, int rows, int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int size = rows*cols;
int myCol = 0;
float rel_diff = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
myCol = i/rows;
if(unit_sums[myCol] > limit)
{
rel_diff = 1.0f/unit_sums[myCol];
w[i] *= rel_diff;
}
else{ continue; }
}
}
__global__ void kFill_with(float *m, float fill_value, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
m[i] = fill_value;
}
__global__ void kFill_with(int *m, int fill_value, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
m[i] = fill_value;
}
__global__ void kRdmNumbers(float *seed, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned long long s[ 2 ];
//s[0] = (long long)seed[(gridDim.x*blockIdx.x) + threadIdx.x];
//s[1] = (long long)seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x];
s[0] = 17;
s[1] = 83;
unsigned long long s1 = s[ 0 ];
unsigned long long s0 = s[ 1 ];
unsigned long long rdm64 = 23459867034598355;
if(idx == 0)
{
printf("rdm: %i\n", rdm64);
printf("rdm1: %i\n", (unsigned int)(rdm64&0xffffffff));
printf("rdm2: %i\n", (unsigned int)((rdm64>>32)&0xffffffff));
}
unsigned int rdm32_1 = 0;
unsigned int rdm32_2 = 0;
//printf("seed 1: %i\n", seed[(gridDim.x*blockIdx.x) + threadIdx.x]);
//printf("seed 2: %i\n", seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]);
//printf("idx: %i\n", idx);
for(int i = idx*2; i < size; i+=numThreads*2)
{
s1 = s[0];
s0 = s[1];
s[0] = s0;
s1 ^= s1 << 23; // a
rdm64 = (s[1 ] = (s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26))) + s0; // b, c
rdm32_1 = (rdm64&0xffffffff);
rdm32_2 = ((rdm64>>32)&0xffffffff);
out[i] = rdm32_1;
out[i+1] = rdm32_2;
}
seed[(gridDim.x*blockIdx.x) + threadIdx.x] = s[0];
seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x] = s[1];
}
__global__ void kCreateRdmSqrtWeight_Logistic(float *A, int in, int out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const float lower_limit = -4.0f*sqrtf(6.0f/((float)in + out));
const float upper_limit = 4.0f*sqrtf(6.0f/((float)in + out));
const float range = upper_limit-lower_limit;
for (unsigned int i = idx;i < size; i += numThreads)
{
A[i] = lower_limit + (A[i]*range);
}
}
__global__ void kCreateSparseRdmWeight(float *rdm, float* indicies, float *out, int rows, int cols, int connections)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int connection_idx = 0;
float rdm_value = 0.0f;
int size = connections*cols;
int current_col = 0;
//each thread fills one row
for (unsigned int i = idx; i < size; i += numThreads)
{
connection_idx = (int)indicies[i];
rdm_value = rdm[i];
current_col = i/(connections);
out[(current_col*rows)+connection_idx] = rdm_value;
}
}
__global__ void kRandInt(float *A, int lower_limit, int upper_limit, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int range = upper_limit-lower_limit + 1;
for (unsigned int i = idx;i < size; i += numThreads)
{
//use uniform random sample to get integers
A[i] = (float)(((int)((A[i]*range))) + lower_limit);
}
}
//vertical stack for column major format
__global__ void vStack(float *A, float *B, float *out, int size_out, int rows_a, int rows, int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_col = 0;
int current_row = 0;
int offset = 0;
const int rows_b = rows - rows_a;
for (unsigned int i = idx;i < size_out; i += numThreads)
{
current_col = i / rows; //int arithmetic
offset = (current_col*rows);
current_row = i - offset;
if(current_row >= rows_a)
{
//fetch b value
out[i] = B[(current_col*rows_b) + current_row - rows_a];
}
else
{
//fetch a value
out[i] = A[(current_col*rows_a) + current_row];
}
}
}
__global__ void hStack(float *A, float *B, float *out, int size_out, int size_a)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
if(i >= size_a)
{
//append B
out[i] = B[i - size_a];
}
else
{
//append A
out[i] = A[i];
}
}
}
__global__ void hStackN(float **arrA, int general_size, float *out, int size_out, int matrices_count)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_matrix = 0;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
current_matrix = i / general_size;
current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix;
out[i] = arrA[current_matrix][i - (current_matrix*general_size)];
}
}
__global__ void vStackN(float **arrA, float *out, int rows, int cols)
{
int size = rows*cols;
int offset = rows*cols*blockIdx.x;
for(unsigned int i = threadIdx.x; i < size; i+=blockDim.x)
out[offset + i] = arrA[blockIdx.x][i];
}
__global__ void AddGradientsN(float **arrA, int size, int myrank, int matrix_count, float multiplier)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for(int matrix_idx = 0; matrix_idx < matrix_count; matrix_idx++)
{
if(matrix_idx == myrank){ continue; }
for(unsigned int i = idx; i < size; i+=numThreads)
arrA[myrank][i] += arrA[matrix_idx][i];
}
//better numerical stability to do it afterwards
for(unsigned int i = idx; i < size; i+=numThreads)
arrA[myrank][i] *=multiplier;
}
__global__ void hStackN(Matrix **arrA, int general_size, float *out, int size_out, int matrices_count)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_matrix = 0;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
current_matrix = i / general_size;
current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix;
out[i] = arrA[current_matrix]->data[i - (current_matrix*general_size)];
}
}
__global__ void kAdd_to_z(float *z, float *z1, float *y, float *y_count, int rows, int cols, float *out)
{
float value = 0;
for(int row = blockIdx.x; row < rows; row +=gridDim.x)
{
int cls = (int)y[row];
if(threadIdx.x == 0)
atomicAdd(&y_count[cls],1.0f);
for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x)
{
value = z1[row + (col*rows)];
atomicAdd(&out[cls+(col*rows)],value);
}
}
__syncthreads();
for(int row = blockIdx.x; row < rows; row +=gridDim.x)
{
int cls = (int)y[row];
for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x)
{
if(y_count[cls] > 0)
out[cls+(col*rows)] /= y_count[cls];
}
}
}
__global__ void kAdd(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] + B[i];
}
__global__ void kMul(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] * B[i];
}
__global__ void kSub(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] - B[i];
}
__global__ void kSub_Sparse(float *A, float *data, int *ptr_rows, int *idx_cols, float *out, int rows, int cols, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int row_idx = 0;
for (unsigned int i = idx;i < rows*cols; i += numThreads)
out[i] = A[i];
for (unsigned int i = idx;i < size; i += numThreads)
{
for(int j = 0; j < rows + 1; j++)
{
if(ptr_rows[j] > i)
{
row_idx = j-1;
break;
}
}
out[(idx_cols[i] * rows) + row_idx] = A[(idx_cols[i] * rows) + row_idx] - data[i];
}
}
__global__ void kDiv(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = fdividef(A[i],B[i]);
}
__global__ void kExp(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = expf(A[i]);
}
__global__ void kLogistic(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = 1.0f / (1.0 + expf(-A[i]));
}
__global__ void kLogisticGrad(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i]*(1 - A[i]);
}
__global__ void kSqrt(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = sqrtf(A[i]);
}
__global__ void kLog(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = logf(A[i]);
}
__global__ void kSquare(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = powf(A[i], 2.0f);
}
__global__ void kAbs(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = fabsf(A[i]);
}
__global__ void kScalarMul(float *A, float scalar, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = scalar*A[i];
}
__global__ void kScalarAdd(float *A, float scalar, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i]+scalar;
}
__global__ void kTranspose(float *A, float *out, int width, int height)
{
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the Matrix *tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = A[index_in];
}
__syncthreads();
// write the transposed Matrix *tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
out[index_out] = block[threadIdx.x][threadIdx.y];
}
}
//for column major data
__global__ void slice_rows(float *A, float *out, int size_out, int rows_A, int start, int end)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_col = 0;
int current_row = 0;
int offset = 0;
int rows_out = (end - start) + 1;
for (unsigned int i = idx;i < size_out; i += numThreads)
{
current_col = i / rows_out; //note: int arithmetic
current_row = i - (current_col*rows_out);
offset = rows_A*current_col;
out[i] = A[offset + start + current_row];
}
}
//for column major data
__global__ void slice_cols(float *A, float *out, int start, int rows, int size_out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx; i < size_out; i += numThreads)
{
out[i] = A[i+(start*rows)];
}
}
__device__ void reduceToMax(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMaxAndArgMax(float* sdataMax, float* sdataArgMax, unsigned int tid, int threads)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdataMax[tid];
if(threads == 32)
{
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smemMax = sdataMax;
volatile float* smemArgMax = sdataArgMax;
if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; }
if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; }
if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; }
if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; }
if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smemMax = sdataMax;
volatile float* smemArgMax = sdataArgMax;
if (NUM_THREADS >= 64) if(mySum < smemMax[tid + 32]){smemMax[tid] = mySum = smemMax[tid + 32]; smemArgMax[tid] = smemArgMax[tid + 32]; }
if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; }
if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; }
if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; }
if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; }
if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__global__ void kSoftMax(float* A, float* out, unsigned int rows, unsigned int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float col_value = 0.0f;
__shared__ float max_values[THREADS_PER_BLOCKS];
__shared__ float row_sums[THREADS_PER_BLOCKS];
for (unsigned int row = idx; row < rows; row += numThreads)
{
//fill with min values
max_values[idx] = -FLT_MAX;
row_sums[idx] = 0.0f;
//calc max value of the row
for (unsigned int i = 0; i < cols; i++)
{
col_value = A[(i*rows) + row];
if(col_value > max_values[idx])
{
max_values[idx] = col_value;
}
}
//calc the row sum
for (unsigned int i = 0; i < cols; i++)
{
row_sums[idx] += __expf(A[(i*rows) + row] - max_values[idx]);
}
//calc the value of each element in the row
for (unsigned int i = 0; i < cols; i++)
{
out[(i*rows) + row] = __expf(A[(i*rows) + row] - max_values[idx])/row_sums[idx];
}
}
}
//for column major data
__global__ void kSubMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows)*rows; //note: int arithmetic
out[i] = A[i] - v[i - offset];
}
}
//for column major data
__global__ void kAddMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] + v[offset];
}
}
//for column major data
__global__ void kAddScaledMatrixVector(float *A, float *v, float weight, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] + (v[offset]*weight);
}
}
//for column major data
__global__ void kMulMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] * v[offset];
}
}
__global__ void kArgmax(float* A, float* out, unsigned int rows, unsigned int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float max_value = -FLT_MAX;
float max_i = 0;
float col_value = 0.0f;
for (unsigned int row = idx; row < rows; row += numThreads)
{
for (unsigned int i = 0; i < cols; i++)
{
col_value = A[(i*rows) + row];
if(col_value > max_value)
{
max_value = col_value;
max_i = i;
}
}
out[row] = max_i;
}
}
__global__ void kCreate_t_matrix(float *labels, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int label = 0;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
label = (int)(labels[i]);
//offset = (label*rows) gives the current column; i gives the current row
offset = (label*rows) + i;
out[offset] = 1.0f;
}
}
__global__ void kEqual(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
{
out[i] = (float)(A[i] == B[i]);
}
}
__global__ void kRectifiedLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] > 0.0f ? A[i] : 0.0f;
}
__global__ void kRectifiedLinear_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] > 0.0f ? 1.0f : 0.0f;
}
__global__ void kDoubleRectifiedLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = (A[i] > 0.0f) ? A[i] : 0.0f;
out[i] = (value < 1.0f) ? value : 1.0f;
}
}
__global__ void kLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i];
}
__global__ void kDoubleRectifiedLinear_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
{
out[i] = (A[i] <= 0.0f) || (A[i] >=1.0f) ? 0.0f : 1.0f;
}
}
__global__ void kHardTanH(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = (A[i] > 1.0f) ? A[i] : 1.0f;
out[i] = (value < -1.0f) ? value : -1.0f;
}
}
__global__ void kPairwise_ranking(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = 1.0f - A[i] + B[i];
out[i] = value < 0.0f ? 0.0f : value;
}
}
__global__ void kPairwise_ranking_derivative(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = (1.0f - A[i] + B[i]) > 0.0f ? 1.0f : 0.0f;
}
__global__ void kHardTanH_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = (A[i] < -1.0f) || (A[i] >1.0f) ? 0.0f : 1.0f;
}
__global__ void kSquaredError(float *A, float *t, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = powf(A[i] -t[i],2.0f);
}
__global__ void kSum(float *v, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
out[0] = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
atomicAdd(&out[0],v[i]);
}
}
__global__ void kArange(float *out, int start, int rows, int cols, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i % rows)*cols;
out[i] = (float)(offset + (i/rows) + start);
}
}
__global__ void kDropout(float *A, float *rdm, float dropout, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
rdm[i] = rdm[i] > dropout ? A[i] : 0.0f;
}
__global__ void kDropout_cached(float *A, float *dropout, float *out, int current_idx, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = ((blockIdx.x * blockDim.x) + threadIdx.x);
int shifted_idx = 0;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
shifted_idx = i +current_idx;
offset = shifted_idx/10000;
out[i] = dropout[shifted_idx - (offset*10000)] == 1.0f ? A[i] : 0.0f;
}
}
__global__ void kRMSprop(float *RMS, float *grad, float RMS_multiplier, float learning_rate, int batch_size, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
}
}
__global__ void kRMSprop_with_momentum_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
float momentum_matrix_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
momentum_matrix_value = m[i];
momentum_matrix_value -= grad_value;
RMS[i] = RMS_value;
m[i] = momentum_matrix_value;
}
}
__global__ void kLocalGrad (float *z, float *w, float *y, float *m, float learning_rate, int batch_size, int size, float momentum)
{
}
__global__ void kRMSprop_with_momentum_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
float momentum_matrix_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
momentum_matrix_value = m[i] = (momentum*momentum_matrix_value) - grad_value;
RMS[i] = RMS_value;
w[i] += momentum_matrix_value;
}
}
__global__ void kRMSprop_with_nesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - (learning_rate*grad_value);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
w[i] -= grad_value;
/*
grad_value = learning_rate*fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - grad_value;
w[i] -= grad_value;
*/
}
}
__global__ void kNesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = learning_rate*fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - grad_value;
w[i] -= grad_value;
}
}
__global__ void kCompression_8bit_test(float *tbl, float *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0;
float multiplier = 0.1f/precision;
float threshold = precision/1.e6f;
__shared__ float tbl_values[128];
if(threadIdx.x < 126)
tbl_values[threadIdx.x] = tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
absnumber = A[i]*multiplier;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold){ out[i] = 0.0f; continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_values[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_values[pivot]-absnumber) < (tbl_values[upper_pivot]-absnumber))
out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
out[i] = tbl_values[upper_pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
if((tbl_values[pivot]-absnumber) < fabsf(tbl_values[lower_pivot]-absnumber))
out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
out[i] = tbl_values[lower_pivot]/(isNegative == 1 ? -multiplier : multiplier);
}
}
__global__ void kDecompression_8bit(float *flt_tbl, unsigned char *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ float tbl_floats[256];
if(threadIdx.x < 126)
{
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]*precision;
tbl_floats[threadIdx.x+128] = -tbl_floats[threadIdx.x];
}
tbl_floats[126] = precision;
tbl_floats[254] = precision;//0.0f;
tbl_floats[127] = precision;
tbl_floats[255] = -precision;
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
out[i] = tbl_floats[A[i]];
}
}
__global__ void kCompression_8bit(float *flt_tbl, float *A, float precision, int size, unsigned char *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0f;
float threshold_lower = 0.0000015;
float threshold_upper = 0.995703;
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
__shared__ float tbl_floats[128];
if(threadIdx.x < 126)
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
isNegative = 0;
pivot = 63;
upper_pivot = 125;
lower_pivot = 0;
absnumber = A[i]/precision;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold_lower){ out[i] = (unsigned char)254; continue; }
if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_floats[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber))
if(isNegative == 1)
out[i] = pivot | 1 << 7;
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = upper_pivot | 1 << 7;
else
out[i] = upper_pivot;
else
if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber))
if(isNegative == 1)
out[i] = (pivot | 1 << 7);
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = lower_pivot | 1 << 7;
else
out[i] = lower_pivot;
}
}
__global__ void kCompression_8bit_float(float *flt_tbl, float *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0f;
float threshold_lower = 0.0000015;
float threshold_upper = 0.995703;
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
__shared__ float tbl_floats[128];
if(threadIdx.x < 126)
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
isNegative = 0;
pivot = 63;
upper_pivot = 125;
lower_pivot = 0;
absnumber = A[i]/precision;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold_lower){ out[i] = (unsigned char)126; continue; }
if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_floats[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber))
if(isNegative == 1)
out[i] = pivot | 1 << 7;
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = upper_pivot | 1 << 7;
else
out[i] = upper_pivot;
else
if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber))
if(isNegative == 1)
out[i] = (pivot | 1 << 7);
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = lower_pivot | 1 << 7;
else
out[i] = lower_pivot;
}
}
__global__ void kRMSprop_with_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
//grad_value = fdividef(grad[i],(float)batch_size) ;
grad_value = grad[i];
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
w[i] -= grad_value;
}
}
__global__ void kRMSprop_with_weight_update_8bit(float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
}
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha)
{
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n)
{
/*
for(int i = 0; i < indptr[m+1];i++)
if(indices[i] > 23)
{
printf("ERROR: \n");
printf("%i \n", indices[i]);
printf("col: %i \n", col);
printf("row: %i \n", row);
}
*/
int max_idx = indptr[m+1];
for(int i = 0; i < m+1;i++)
if(indptr[i] > max_idx)
{
printf("ERROR: \n");
printf("%i \n", indptr[i]);
printf("max_idx: %i \n", max_idx);
}
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++)
{
/*
for(int a = start; a < end;a++)
if(indices[a] > 23)
{
printf("ERROR: \n");
printf("%i \n", indices[a]);
printf("a: %i \n", a);
}
*/
sum += data[i] * dense_data[(col * k) + indices[i]];
if(sum > 500000 || sum < -500000)
{
printf("start: %i ", start);
printf("end: %i ", end);
printf("i: %i ", i);
printf("k: %i ", k);
printf("col: %i ", col);
printf("data idx %i ", indices[i]);
printf("full idx %i ", (col * k) + indices[i]);
printf("data sparse %f ", data[i]);
printf("data dense %f ", dense_data[col * k + indices[i]]);
printf("data point %f ", data[i] * dense_data[col * k + indices[i]]);
printf(" sum %f\n", sum);
return;
}
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kPrintData(float *A, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
__syncthreads();
if(idx == 0)
printf("[");
for (unsigned int i = idx;i < size; i += numThreads)
printf("%f ",A[i]);
__syncthreads();
if(idx == 0)
printf("]\n");
}
__global__ void kMaxout(float *A, float *out, float *outargmax, int maxout_level, unsigned int cols, unsigned int rows)
{
__shared__ float max_values[32];
__shared__ float argmax_values[32];
float const min_value = -FLT_MAX;
for(int row = blockIdx.x; row < rows; row +=blockDim.x)
{
int softout_block_idx = row + (blockIdx.y*maxout_level*rows);
if(threadIdx.x < maxout_level)
{
max_values[threadIdx.x] = A[softout_block_idx+(threadIdx.x*rows)];
argmax_values[threadIdx.x] = (float)((blockIdx.y*maxout_level)+threadIdx.x);
}
else
{
max_values[threadIdx.x] = min_value;
argmax_values[threadIdx.x] = -1.0f;
}
//reduceToMax(max_values, threadIdx.x);
reduceToMaxAndArgMax(max_values, argmax_values, threadIdx.x, 32);
__syncthreads();
if(threadIdx.x == 0) out[row + (blockIdx.y*rows)] = max_values[0];
if(threadIdx.x == 1) outargmax[row + (blockIdx.y*rows)] = argmax_values[0];
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height)
{
extern __shared__ float max_vals[];
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kExpandToMaxoutGrad(float* error, float* indexes, float *out, int error_size, int error_rows, int maxout_level)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int grad_size = maxout_level*error_size;
for (unsigned int i = idx;i < grad_size; i += numThreads)
out[i] = 0.0f;
for (unsigned int i = idx;i < error_size; i += numThreads)
{
int row_idx = idx - ((idx / error_rows)*error_rows);
out[row_idx + (((int)indexes[idx])*error_rows)] = error[i];
}
}
__global__ void kConstructVocabMatrix(float *vocab_idx, float *vocab_idx_y, float* vocab, float *rdm_idx, float *batch_X, float *batch_Y)
{
int middleIdx = (gridDim.y/2);
int myIdx = 0;
int myRdmIdx = 0;
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
//middle index is replaced by rdm word for batch_Y, but we still need to write the correct word into batch_X!
if(blockIdx.y != middleIdx)
{
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myIdx;
}
else
{
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
myRdmIdx = (int)rdm_idx[blockIdx.x];
vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myRdmIdx;
}
int myVocabIdx = blockDim.x*myIdx;
int myVocabRdmIdx = blockDim.x*myRdmIdx;
if(blockIdx.y != middleIdx)
{
batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
}
else
{
batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabRdmIdx + threadIdx.x];
}
}
__global__ void concat_batches(float **batch_X, float **batch_Y, float *out_X, float *out_Y)
{
//gridDim.z = matrix_count
//gridDim.y = batch size
//gridDim.x = window_size
//blockDim.x = partial vocab size
int full_vocab_size = gridDim.z*blockDim.x;
int cols = gridDim.x*full_vocab_size;
int partial_cols = blockDim.x*gridDim.x;
//full_size times current row = current row idx
//current window position times partial_threads times current matrix = current word idx
//threadIdx.x current parameter within a word
out_X[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_X[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x];
out_Y[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_Y[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x];
}
/*
//numerically unstable?
__global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = 0;
float multiplier = -fdividef(learning_rate,float(gridDim.x));
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
//printf("%f ",grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier);
//printf("%f ",vocab[myVocabIdx + threadIdx.x]);
//printf("%f ",vocab[myVocabIdx + threadIdx.x]+ (grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier));
if(myIdx > 10000)
atomicAdd(&vocab[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier);
//vocab[myVocabIdx + threadIdx.x] +=grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)];
//printf("%s ",!isfinite(grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier));
}
*/
//numerically unstable?
__global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab[myVocabIdx + threadIdx.x],-grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*learning_rate);
}
__global__ void kExpandDoubleVocabGradient(float *gradX, float *gradY, float *vocab_idx_X, float *vocab_idx_Y, float* vocab,
float *vocab_grad, float *vocab_grad_idx, float learning_rate, int grad_size)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
//float multiplier = fdividef(learning_rate,(float)(gridDim.x*2));
int myIdx_X = (int)vocab_idx_X[blockIdx.x+(blockIdx.y*gridDim.x)];
int myIdx_Y = (int)vocab_idx_Y[blockIdx.x+(blockIdx.y*gridDim.x)];
//int grad_cols = grad_size/blockDim.x;
int myVocabIdx_X = blockDim.x*myIdx_X;
int myVocabIdx_Y = blockDim.x*myIdx_Y;
atomicAdd(&vocab_grad[myVocabIdx_X + threadIdx.x],gradX[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
atomicAdd(&vocab_grad[myVocabIdx_Y + threadIdx.x],gradY[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
/*
vocab_grad_idx[myIdx_X] = 1.0f;
vocab_grad_idx[myIdx_Y] = 1.0f;
__syncthreads();
int block_idx = (blockIdx.y*gridDim.x) + blockIdx.x;
int threads_blocks = gridDim.x*gridDim.y;
for(int i = block_idx; i < grad_cols; i+=threads_blocks)
{
if(vocab_grad_idx[i] == 1.0f)
{
vocab[(i*blockDim.x) + threadIdx.x] -= vocab_grad[(i*blockDim.x) + threadIdx.x]*multiplier;
}
}
*/
}
/*
__global__ void kExpandVocabGradient_sharedMemory(float *grad, float *vocab_idx, float *vocab_grad, float *sorted_vocab_idx, vocab_idx_size)
{
//vocab_vector_size = blockDim.x;
//batch_size = gridDim.x
//try different configs for gridDim.x, e.g 16, 32 etc.
//will have vocab_vector_size = blockDim.x elements e.g. 64
extern __shared__ float sGrads[];
float myWordIdx = 0.0f;
float last_word = 0.0f;
float currentIdx = 0.0f;
sGrads[threadIdx.x] = 0.0f;
for(int word = blockIdx.x; currentIdx < vocab_idx_size; word++)
{
for(int i = currentIdx; i < vocab_idx_size; i++, currentIdx++)
{
}
}
}
*/
__global__ void kExpandVocabGradient(float *grad, float *vocab_idx, float *vocab_grad)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
}
__global__ void kExpandPartialVocabGradient(float *grad, float *vocab_idx, float *vocab_grad, int matrix_idx, int matrix_count)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int offset = matrix_idx*gridDim.x*blockDim.x;
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*(blockDim.x*matrix_count)*gridDim.x) + (threadIdx.x*gridDim.x) + offset]);
}
__global__ void kExpandVocabGradientMiddleWord(float *grad, float *vocab_idx, float *vocab_grad)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
if(blockIdx.x+(blockIdx.y*gridDim.x) == gridDim.y/2)
{
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
}
}
__global__ void kDot8bit(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB)
{
const unsigned int threads_per_block = blockDim.x*blockDim.y;
const int mygrid = blockIdx.x;
const int myidx = (threadIdx.y*blockDim.x)+threadIdx.x;
__shared__ float tbl_floatsA[256];
__shared__ float tbl_floatsB[256];
for(int i = myidx; i < 126; i++)
{
tbl_floatsA[i] = flt_tbl[i]*precisionA;
tbl_floatsA[i+128] = -tbl_floatsA[i];
tbl_floatsB[i] = flt_tbl[i]*precisionB;
tbl_floatsB[i+128] = -tbl_floatsB[i];
}
tbl_floatsA[126] = 0.0f;
tbl_floatsB[126] = 0.0f;
tbl_floatsA[127] = precisionA;
tbl_floatsB[127] = -precisionA;
tbl_floatsA[254] = -0.0f;
tbl_floatsB[254] = -0.0f;
tbl_floatsA[255] = precisionB;
tbl_floatsB[255] = -precisionB;
__syncthreads();
for(int Arow = mygrid; Arow < rowsA; Arow+=gridDim.x)
{
for(int Bcol = myidx; Bcol < colsB; Bcol+=threads_per_block)
{
int idxout = (Bcol*rowsA) + Arow;
for(int Acol = 0; Acol < colsA; Acol++)
out[idxout] += tbl_floatsA[A[(Acol*rowsA)+Arow]] * tbl_floatsB[B[(colsA*Bcol) + Acol]];
}
}
}
__global__ void kDot8bit_shared(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB)
{
int myidx = (threadIdx.y*blockDim.x)+threadIdx.x;
__shared__ unsigned char A_tile[64][256]; //64x32 banks
__shared__ unsigned char B_tile[64][256];//256x8 banks
__shared__ float tbl_floatsA[256];
__shared__ float tbl_floatsB[256];
for(int i = myidx; i < 126; i++)
{
tbl_floatsA[i] = flt_tbl[i]*precisionA;
tbl_floatsA[i+128] = -tbl_floatsA[i];
tbl_floatsB[i] = flt_tbl[i]*precisionB;
tbl_floatsB[i+128] = -tbl_floatsB[i];
}
tbl_floatsA[126] = 0.0f;
tbl_floatsB[126] = 0.0f;
tbl_floatsA[127] = precisionA;
tbl_floatsB[127] = -precisionA;
tbl_floatsA[254] = -0.0f;
tbl_floatsB[254] = -0.0f;
tbl_floatsA[255] = precisionB;
tbl_floatsB[255] = -precisionB;
__syncthreads();
int offset = 0;
myidx = threadIdx.y*16;
int Arow = threadIdx.x+(blockIdx.x*64);
int Acol = (threadIdx.y*16)+(blockIdx.y*256);
if(Arow < rowsA)
{
for(int i = 0; i < 16; i++){ A_tile[threadIdx.x][myidx+i] = A[((Acol+i)*rowsA)+ Arow]; }
for(int i = threadIdx.y; i < colsB; i+=blockDim.y){ out[((i)*rowsA) + Arow] = 0.0f; }
}
else
for(int i = 0; i < 16; i++){ A_tile[threadIdx.x][myidx+i] = 126; }
for(int Btile = 0 ; Btile < colsB; Btile+=64)
{
if(Btile+threadIdx.x < colsB)
{
for(int i = 0; i < 16; i++)
{
if(Acol+i < colsA)
B_tile[threadIdx.x][myidx+i] = B[((threadIdx.x + Btile)*colsA)+ Acol+i];//B_tile is transposed to avoid bank conflicts with 64 threads
else
B_tile[threadIdx.x][myidx+i] = 126;
}
}
else
{
for(int i = 0; i < 16; i++)
B_tile[threadIdx.x][myidx+i] = 126;//B_tile is transposed to avoid bank conflicts with 64 threads
}
__syncthreads();
for(int Bcol2 = offset; Bcol2 < 64 + offset; Bcol2++)
{
for (int i = 0; i < 16; ++i)
atomicAdd(&out[((Bcol2)*rowsA) + Arow],
tbl_floatsA[A_tile[threadIdx.x][myidx + i]] *
tbl_floatsB[B_tile[Bcol2-offset][myidx + i]]);
}
offset +=64;
}
}
__global__ void MatMul(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue;
}
static __device__ void saxpy(float alpha, const float* b, float* c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void sgemm_kernel_N_N_64_16_16_16_4(float* C,const float* A,const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
__global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += inx + __mul24( iby + iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][17];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[inx][iny+i] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
__global__ void sgemm_kernel_N_T_64_16_4_16_4(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
if ( iby + tx >= n )
B += iby + 0;
else
B += iby + tx;
/*
Taking care of boundary cases where K < 4.
*/
if ( ty >= k )
B += __mul24( 0, ldb );
else
B += __mul24( ty, ldb );
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda;
switch (k) {
case 1: s2=0; s3=0; s4=0; break;
case 2: s2=lda; s3=0; s4=0; break;
case 3: s2=lda; s3=2*lda; s4=0; break;
}
C += ibx + idt + __mul24( iby, ldc );
float Ap[4] = { A[0], A[s2], A[s3], A[s4] };
float b = B[0];
const float *Bend = B + ldb*(k - k % 4);
B += 4*ldb;
A += 4*lda;
__shared__ float Bb[4][16];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 7 ) {
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
saxpy( Ab[0], &Bb[0][0], Cb );
saxpy( Ab[1], &Bb[1][0], Cb );
saxpy( Ab[2], &Bb[2][0], Cb );
saxpy( Ab[3], &Bb[3][0], Cb );
A += 4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
}
if ( k > 3 ) {
Bb[ty][tx]=b;
int k1 = k - k % 4;
if ( (k1+ty) >= k )
B -= 4*ldb;
else
B -= 0*ldb;
if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else
if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; }
__syncthreads();
b=B[0];
saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0];
saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2];
saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3];
saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4];
}
k = k % 4;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( Ap[i], &Bb[i][0], Cb );
}
}
if ( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
__global__ void sgemm_kernel_T_N_32_32_8_8_8(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
const int ibx = blockIdx.x * 32;
const int iby = blockIdx.y * 32;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
int idt = tx*8 + ty;
if ( ty >= k )
A += __mul24(ibx, lda) + 0;
else
A += __mul24(ibx, lda) + ty;
if ( (ibx + tx) >= m )
A += __mul24(0, lda);
else
A += __mul24(tx, lda);
if ( (iby+tx) >= n )
B += __mul24(iby+0, ldb);
else
B += __mul24(iby+tx, ldb);
if ( ty >= k )
B += 0;
else
B += ty;
C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc );
lda = lda * 8;
ldb = ldb * 8;
int as1=0, as2=lda, as3=2*lda, as4=3*lda;
int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb;
switch(k) {
case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break;
case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break;
case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break;
}
if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; }
if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; }
float b = B[bs1];
float b1 = B[bs2];
float b2 = B[bs3];
float b3 = B[bs4];
float Ap[4] = { A[as1], A[as2], A[as3], A[as4] };
const float *Bend = B + (k - k % 8);
B += 8;
A += 8;
__shared__ float Bb[8][33];
__shared__ float ABb[32][9];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int l = 17*(idt/32);
int idt1 = idt;
idt = idt % 32;
if ( k > 15 ) {
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if ( k > 7 ) {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k - k % 8;
if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; }
if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; }
as1=0;
saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
}
k = k % 8;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( ABb[idt][i], &Bb[i][l], Cb );
}
}
if ( (iby+16*(idt1/32+1)) >= n ) {
lda = n - iby - 16*(idt1/32);
}
else {
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
|
51e8a1e90b8fa6cecb6e85bbbc3ce2302d6e8321.cu
|
#include <basicOps.cuh>
#include <curand.h>
#include <curand_kernel.h>
#include <float.h>
const int NUM_THREADS = 32;
__global__ void kGetNonZeroElements(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
atomicAdd(&out[0],A[i] != 0.0f ? 1.0f : 0.0f);
}
__global__ void kGetNonZeroColumns(float *A, float *out, int rows, int cols)
{
const int myCol = (blockIdx.x * blockDim.x) + threadIdx.x;
float result = 0.0f;
if(myCol < cols)
{
for (unsigned int i = 0;i < rows; i++)
{
if(A[(myCol*rows) + i] != 0.0f)
result = 1.0f;
}
atomicAdd(&out[0],result);
}
}
__global__ void kRenormalizeWeights(float *w, float *unit_sums, float limit, int rows, int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int size = rows*cols;
int myCol = 0;
float rel_diff = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
myCol = i/rows;
if(unit_sums[myCol] > limit)
{
rel_diff = 1.0f/unit_sums[myCol];
w[i] *= rel_diff;
}
else{ continue; }
}
}
__global__ void kFill_with(float *m, float fill_value, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
m[i] = fill_value;
}
__global__ void kFill_with(int *m, int fill_value, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
m[i] = fill_value;
}
__global__ void kRdmNumbers(float *seed, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned long long s[ 2 ];
//s[0] = (long long)seed[(gridDim.x*blockIdx.x) + threadIdx.x];
//s[1] = (long long)seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x];
s[0] = 17;
s[1] = 83;
unsigned long long s1 = s[ 0 ];
unsigned long long s0 = s[ 1 ];
unsigned long long rdm64 = 23459867034598355;
if(idx == 0)
{
printf("rdm: %i\n", rdm64);
printf("rdm1: %i\n", (unsigned int)(rdm64&0xffffffff));
printf("rdm2: %i\n", (unsigned int)((rdm64>>32)&0xffffffff));
}
unsigned int rdm32_1 = 0;
unsigned int rdm32_2 = 0;
//printf("seed 1: %i\n", seed[(gridDim.x*blockIdx.x) + threadIdx.x]);
//printf("seed 2: %i\n", seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]);
//printf("idx: %i\n", idx);
for(int i = idx*2; i < size; i+=numThreads*2)
{
s1 = s[0];
s0 = s[1];
s[0] = s0;
s1 ^= s1 << 23; // a
rdm64 = (s[1 ] = (s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26))) + s0; // b, c
rdm32_1 = (rdm64&0xffffffff);
rdm32_2 = ((rdm64>>32)&0xffffffff);
out[i] = rdm32_1;
out[i+1] = rdm32_2;
}
seed[(gridDim.x*blockIdx.x) + threadIdx.x] = s[0];
seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x] = s[1];
}
__global__ void kCreateRdmSqrtWeight_Logistic(float *A, int in, int out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const float lower_limit = -4.0f*sqrtf(6.0f/((float)in + out));
const float upper_limit = 4.0f*sqrtf(6.0f/((float)in + out));
const float range = upper_limit-lower_limit;
for (unsigned int i = idx;i < size; i += numThreads)
{
A[i] = lower_limit + (A[i]*range);
}
}
__global__ void kCreateSparseRdmWeight(float *rdm, float* indicies, float *out, int rows, int cols, int connections)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int connection_idx = 0;
float rdm_value = 0.0f;
int size = connections*cols;
int current_col = 0;
//each thread fills one row
for (unsigned int i = idx; i < size; i += numThreads)
{
connection_idx = (int)indicies[i];
rdm_value = rdm[i];
current_col = i/(connections);
out[(current_col*rows)+connection_idx] = rdm_value;
}
}
__global__ void kRandInt(float *A, int lower_limit, int upper_limit, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int range = upper_limit-lower_limit + 1;
for (unsigned int i = idx;i < size; i += numThreads)
{
//use uniform random sample to get integers
A[i] = (float)(((int)((A[i]*range))) + lower_limit);
}
}
//vertical stack for column major format
__global__ void vStack(float *A, float *B, float *out, int size_out, int rows_a, int rows, int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_col = 0;
int current_row = 0;
int offset = 0;
const int rows_b = rows - rows_a;
for (unsigned int i = idx;i < size_out; i += numThreads)
{
current_col = i / rows; //int arithmetic
offset = (current_col*rows);
current_row = i - offset;
if(current_row >= rows_a)
{
//fetch b value
out[i] = B[(current_col*rows_b) + current_row - rows_a];
}
else
{
//fetch a value
out[i] = A[(current_col*rows_a) + current_row];
}
}
}
__global__ void hStack(float *A, float *B, float *out, int size_out, int size_a)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
if(i >= size_a)
{
//append B
out[i] = B[i - size_a];
}
else
{
//append A
out[i] = A[i];
}
}
}
__global__ void hStackN(float **arrA, int general_size, float *out, int size_out, int matrices_count)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_matrix = 0;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
current_matrix = i / general_size;
current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix;
out[i] = arrA[current_matrix][i - (current_matrix*general_size)];
}
}
__global__ void vStackN(float **arrA, float *out, int rows, int cols)
{
int size = rows*cols;
int offset = rows*cols*blockIdx.x;
for(unsigned int i = threadIdx.x; i < size; i+=blockDim.x)
out[offset + i] = arrA[blockIdx.x][i];
}
__global__ void AddGradientsN(float **arrA, int size, int myrank, int matrix_count, float multiplier)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for(int matrix_idx = 0; matrix_idx < matrix_count; matrix_idx++)
{
if(matrix_idx == myrank){ continue; }
for(unsigned int i = idx; i < size; i+=numThreads)
arrA[myrank][i] += arrA[matrix_idx][i];
}
//better numerical stability to do it afterwards
for(unsigned int i = idx; i < size; i+=numThreads)
arrA[myrank][i] *=multiplier;
}
__global__ void hStackN(Matrix **arrA, int general_size, float *out, int size_out, int matrices_count)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_matrix = 0;
for(unsigned int i = idx; i < size_out; i+=numThreads)
{
current_matrix = i / general_size;
current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix;
out[i] = arrA[current_matrix]->data[i - (current_matrix*general_size)];
}
}
__global__ void kAdd_to_z(float *z, float *z1, float *y, float *y_count, int rows, int cols, float *out)
{
float value = 0;
for(int row = blockIdx.x; row < rows; row +=gridDim.x)
{
int cls = (int)y[row];
if(threadIdx.x == 0)
atomicAdd(&y_count[cls],1.0f);
for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x)
{
value = z1[row + (col*rows)];
atomicAdd(&out[cls+(col*rows)],value);
}
}
__syncthreads();
for(int row = blockIdx.x; row < rows; row +=gridDim.x)
{
int cls = (int)y[row];
for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x)
{
if(y_count[cls] > 0)
out[cls+(col*rows)] /= y_count[cls];
}
}
}
__global__ void kAdd(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] + B[i];
}
__global__ void kMul(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] * B[i];
}
__global__ void kSub(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] - B[i];
}
__global__ void kSub_Sparse(float *A, float *data, int *ptr_rows, int *idx_cols, float *out, int rows, int cols, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int row_idx = 0;
for (unsigned int i = idx;i < rows*cols; i += numThreads)
out[i] = A[i];
for (unsigned int i = idx;i < size; i += numThreads)
{
for(int j = 0; j < rows + 1; j++)
{
if(ptr_rows[j] > i)
{
row_idx = j-1;
break;
}
}
out[(idx_cols[i] * rows) + row_idx] = A[(idx_cols[i] * rows) + row_idx] - data[i];
}
}
__global__ void kDiv(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = fdividef(A[i],B[i]);
}
__global__ void kExp(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = expf(A[i]);
}
__global__ void kLogistic(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = 1.0f / (1.0 + expf(-A[i]));
}
__global__ void kLogisticGrad(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i]*(1 - A[i]);
}
__global__ void kSqrt(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = sqrtf(A[i]);
}
__global__ void kLog(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = logf(A[i]);
}
__global__ void kSquare(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = powf(A[i], 2.0f);
}
__global__ void kAbs(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = fabsf(A[i]);
}
__global__ void kScalarMul(float *A, float scalar, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = scalar*A[i];
}
__global__ void kScalarAdd(float *A, float scalar, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i]+scalar;
}
__global__ void kTranspose(float *A, float *out, int width, int height)
{
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the Matrix *tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = A[index_in];
}
__syncthreads();
// write the transposed Matrix *tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
out[index_out] = block[threadIdx.x][threadIdx.y];
}
}
//for column major data
__global__ void slice_rows(float *A, float *out, int size_out, int rows_A, int start, int end)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int current_col = 0;
int current_row = 0;
int offset = 0;
int rows_out = (end - start) + 1;
for (unsigned int i = idx;i < size_out; i += numThreads)
{
current_col = i / rows_out; //note: int arithmetic
current_row = i - (current_col*rows_out);
offset = rows_A*current_col;
out[i] = A[offset + start + current_row];
}
}
//for column major data
__global__ void slice_cols(float *A, float *out, int start, int rows, int size_out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx; i < size_out; i += numThreads)
{
out[i] = A[i+(start*rows)];
}
}
__device__ void reduceToMax(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMaxAndArgMax(float* sdataMax, float* sdataArgMax, unsigned int tid, int threads)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdataMax[tid];
if(threads == 32)
{
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smemMax = sdataMax;
volatile float* smemArgMax = sdataArgMax;
if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; }
if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; }
if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; }
if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; }
if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smemMax = sdataMax;
volatile float* smemArgMax = sdataArgMax;
if (NUM_THREADS >= 64) if(mySum < smemMax[tid + 32]){smemMax[tid] = mySum = smemMax[tid + 32]; smemArgMax[tid] = smemArgMax[tid + 32]; }
if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; }
if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; }
if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; }
if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; }
if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__global__ void kSoftMax(float* A, float* out, unsigned int rows, unsigned int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float col_value = 0.0f;
__shared__ float max_values[THREADS_PER_BLOCKS];
__shared__ float row_sums[THREADS_PER_BLOCKS];
for (unsigned int row = idx; row < rows; row += numThreads)
{
//fill with min values
max_values[idx] = -FLT_MAX;
row_sums[idx] = 0.0f;
//calc max value of the row
for (unsigned int i = 0; i < cols; i++)
{
col_value = A[(i*rows) + row];
if(col_value > max_values[idx])
{
max_values[idx] = col_value;
}
}
//calc the row sum
for (unsigned int i = 0; i < cols; i++)
{
row_sums[idx] += __expf(A[(i*rows) + row] - max_values[idx]);
}
//calc the value of each element in the row
for (unsigned int i = 0; i < cols; i++)
{
out[(i*rows) + row] = __expf(A[(i*rows) + row] - max_values[idx])/row_sums[idx];
}
}
}
//for column major data
__global__ void kSubMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows)*rows; //note: int arithmetic
out[i] = A[i] - v[i - offset];
}
}
//for column major data
__global__ void kAddMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] + v[offset];
}
}
//for column major data
__global__ void kAddScaledMatrixVector(float *A, float *v, float weight, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] + (v[offset]*weight);
}
}
//for column major data
__global__ void kMulMatrixVector(float *A, float *v, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//offset = current_column * rows
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i / rows); //note: int arithmetic
out[i] = A[i] * v[offset];
}
}
__global__ void kArgmax(float* A, float* out, unsigned int rows, unsigned int cols)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float max_value = -FLT_MAX;
float max_i = 0;
float col_value = 0.0f;
for (unsigned int row = idx; row < rows; row += numThreads)
{
for (unsigned int i = 0; i < cols; i++)
{
col_value = A[(i*rows) + row];
if(col_value > max_value)
{
max_value = col_value;
max_i = i;
}
}
out[row] = max_i;
}
}
__global__ void kCreate_t_matrix(float *labels, float *out, int rows, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int label = 0;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
label = (int)(labels[i]);
//offset = (label*rows) gives the current column; i gives the current row
offset = (label*rows) + i;
out[offset] = 1.0f;
}
}
__global__ void kEqual(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
{
out[i] = (float)(A[i] == B[i]);
}
}
__global__ void kRectifiedLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] > 0.0f ? A[i] : 0.0f;
}
__global__ void kRectifiedLinear_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i] > 0.0f ? 1.0f : 0.0f;
}
__global__ void kDoubleRectifiedLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = (A[i] > 0.0f) ? A[i] : 0.0f;
out[i] = (value < 1.0f) ? value : 1.0f;
}
}
__global__ void kLinear(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = A[i];
}
__global__ void kDoubleRectifiedLinear_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
{
out[i] = (A[i] <= 0.0f) || (A[i] >=1.0f) ? 0.0f : 1.0f;
}
}
__global__ void kHardTanH(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = (A[i] > 1.0f) ? A[i] : 1.0f;
out[i] = (value < -1.0f) ? value : -1.0f;
}
}
__global__ void kPairwise_ranking(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
value = 1.0f - A[i] + B[i];
out[i] = value < 0.0f ? 0.0f : value;
}
}
__global__ void kPairwise_ranking_derivative(float *A, float *B, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = (1.0f - A[i] + B[i]) > 0.0f ? 1.0f : 0.0f;
}
__global__ void kHardTanH_Derivative(float *A, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = (A[i] < -1.0f) || (A[i] >1.0f) ? 0.0f : 1.0f;
}
__global__ void kSquaredError(float *A, float *t, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
out[i] = powf(A[i] -t[i],2.0f);
}
__global__ void kSum(float *v, float *out, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
out[0] = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
atomicAdd(&out[0],v[i]);
}
}
__global__ void kArange(float *out, int start, int rows, int cols, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
offset = (i % rows)*cols;
out[i] = (float)(offset + (i/rows) + start);
}
}
__global__ void kDropout(float *A, float *rdm, float dropout, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
for (unsigned int i = idx;i < size; i += numThreads)
rdm[i] = rdm[i] > dropout ? A[i] : 0.0f;
}
__global__ void kDropout_cached(float *A, float *dropout, float *out, int current_idx, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = ((blockIdx.x * blockDim.x) + threadIdx.x);
int shifted_idx = 0;
int offset = 0;
for (unsigned int i = idx;i < size; i += numThreads)
{
shifted_idx = i +current_idx;
offset = shifted_idx/10000;
out[i] = dropout[shifted_idx - (offset*10000)] == 1.0f ? A[i] : 0.0f;
}
}
__global__ void kRMSprop(float *RMS, float *grad, float RMS_multiplier, float learning_rate, int batch_size, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
}
}
__global__ void kRMSprop_with_momentum_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
float momentum_matrix_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
momentum_matrix_value = m[i];
momentum_matrix_value -= grad_value;
RMS[i] = RMS_value;
m[i] = momentum_matrix_value;
}
}
__global__ void kLocalGrad (float *z, float *w, float *y, float *m, float learning_rate, int batch_size, int size, float momentum)
{
}
__global__ void kRMSprop_with_momentum_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
float momentum_matrix_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
momentum_matrix_value = m[i] = (momentum*momentum_matrix_value) - grad_value;
RMS[i] = RMS_value;
w[i] += momentum_matrix_value;
}
}
__global__ void kRMSprop_with_nesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - (learning_rate*grad_value);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
w[i] -= grad_value;
/*
grad_value = learning_rate*fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - grad_value;
w[i] -= grad_value;
*/
}
}
__global__ void kNesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = learning_rate*fdividef(grad[i],(float)batch_size);
m[i] = (momentum*m[i]) - grad_value;
w[i] -= grad_value;
}
}
__global__ void kCompression_8bit_test(float *tbl, float *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0;
float multiplier = 0.1f/precision;
float threshold = precision/1.e6f;
__shared__ float tbl_values[128];
if(threadIdx.x < 126)
tbl_values[threadIdx.x] = tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
absnumber = A[i]*multiplier;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold){ out[i] = 0.0f; continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_values[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_values[pivot]-absnumber) < (tbl_values[upper_pivot]-absnumber))
out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
out[i] = tbl_values[upper_pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
if((tbl_values[pivot]-absnumber) < fabsf(tbl_values[lower_pivot]-absnumber))
out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier);
else
out[i] = tbl_values[lower_pivot]/(isNegative == 1 ? -multiplier : multiplier);
}
}
__global__ void kDecompression_8bit(float *flt_tbl, unsigned char *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ float tbl_floats[256];
if(threadIdx.x < 126)
{
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]*precision;
tbl_floats[threadIdx.x+128] = -tbl_floats[threadIdx.x];
}
tbl_floats[126] = precision;
tbl_floats[254] = precision;//0.0f;
tbl_floats[127] = precision;
tbl_floats[255] = -precision;
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
out[i] = tbl_floats[A[i]];
}
}
__global__ void kCompression_8bit(float *flt_tbl, float *A, float precision, int size, unsigned char *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0f;
float threshold_lower = 0.0000015;
float threshold_upper = 0.995703;
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
__shared__ float tbl_floats[128];
if(threadIdx.x < 126)
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
isNegative = 0;
pivot = 63;
upper_pivot = 125;
lower_pivot = 0;
absnumber = A[i]/precision;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold_lower){ out[i] = (unsigned char)254; continue; }
if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_floats[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber))
if(isNegative == 1)
out[i] = pivot | 1 << 7;
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = upper_pivot | 1 << 7;
else
out[i] = upper_pivot;
else
if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber))
if(isNegative == 1)
out[i] = (pivot | 1 << 7);
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = lower_pivot | 1 << 7;
else
out[i] = lower_pivot;
}
}
__global__ void kCompression_8bit_float(float *flt_tbl, float *A, float precision, int size, float *out)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float absnumber = 0.0f;
float threshold_lower = 0.0000015;
float threshold_upper = 0.995703;
int isNegative = 0;
int pivot = 63;
int upper_pivot = 125;
int lower_pivot = 0;
__shared__ float tbl_floats[128];
if(threadIdx.x < 126)
tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x];
__syncthreads();
for (int i = idx;i < size; i += numThreads)
{
isNegative = 0;
pivot = 63;
upper_pivot = 125;
lower_pivot = 0;
absnumber = A[i]/precision;
if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; }
if(absnumber < threshold_lower){ out[i] = (unsigned char)126; continue; }
if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; }
for(int j = 32; j > 0; j>>=1)
{
if(absnumber > tbl_floats[pivot])
{
lower_pivot = pivot;
pivot+=j;
}
else
{
upper_pivot = pivot;
pivot-=j;
}
}
if(lower_pivot == pivot)
if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber))
if(isNegative == 1)
out[i] = pivot | 1 << 7;
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = upper_pivot | 1 << 7;
else
out[i] = upper_pivot;
else
if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber))
if(isNegative == 1)
out[i] = (pivot | 1 << 7);
else
out[i] = pivot;
else
if(isNegative == 1)
out[i] = lower_pivot | 1 << 7;
else
out[i] = lower_pivot;
}
}
__global__ void kRMSprop_with_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
//grad_value = fdividef(grad[i],(float)batch_size) ;
grad_value = grad[i];
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
w[i] -= grad_value;
}
}
__global__ void kRMSprop_with_weight_update_8bit(float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float grad_value = 0.0f;
float RMS_value = 0.0f;
float rms_reciprocal = 1.0f - RMS_multiplier;
for (unsigned int i = idx;i < size; i += numThreads)
{
grad_value = fdividef(grad[i],(float)batch_size);
RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal);
grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f));
RMS[i] = RMS_value;
}
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha)
{
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n)
{
/*
for(int i = 0; i < indptr[m+1];i++)
if(indices[i] > 23)
{
printf("ERROR: \n");
printf("%i \n", indices[i]);
printf("col: %i \n", col);
printf("row: %i \n", row);
}
*/
int max_idx = indptr[m+1];
for(int i = 0; i < m+1;i++)
if(indptr[i] > max_idx)
{
printf("ERROR: \n");
printf("%i \n", indptr[i]);
printf("max_idx: %i \n", max_idx);
}
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++)
{
/*
for(int a = start; a < end;a++)
if(indices[a] > 23)
{
printf("ERROR: \n");
printf("%i \n", indices[a]);
printf("a: %i \n", a);
}
*/
sum += data[i] * dense_data[(col * k) + indices[i]];
if(sum > 500000 || sum < -500000)
{
printf("start: %i ", start);
printf("end: %i ", end);
printf("i: %i ", i);
printf("k: %i ", k);
printf("col: %i ", col);
printf("data idx %i ", indices[i]);
printf("full idx %i ", (col * k) + indices[i]);
printf("data sparse %f ", data[i]);
printf("data dense %f ", dense_data[col * k + indices[i]]);
printf("data point %f ", data[i] * dense_data[col * k + indices[i]]);
printf(" sum %f\n", sum);
return;
}
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kPrintData(float *A, int size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
__syncthreads();
if(idx == 0)
printf("[");
for (unsigned int i = idx;i < size; i += numThreads)
printf("%f ",A[i]);
__syncthreads();
if(idx == 0)
printf("]\n");
}
__global__ void kMaxout(float *A, float *out, float *outargmax, int maxout_level, unsigned int cols, unsigned int rows)
{
__shared__ float max_values[32];
__shared__ float argmax_values[32];
float const min_value = -FLT_MAX;
for(int row = blockIdx.x; row < rows; row +=blockDim.x)
{
int softout_block_idx = row + (blockIdx.y*maxout_level*rows);
if(threadIdx.x < maxout_level)
{
max_values[threadIdx.x] = A[softout_block_idx+(threadIdx.x*rows)];
argmax_values[threadIdx.x] = (float)((blockIdx.y*maxout_level)+threadIdx.x);
}
else
{
max_values[threadIdx.x] = min_value;
argmax_values[threadIdx.x] = -1.0f;
}
//reduceToMax(max_values, threadIdx.x);
reduceToMaxAndArgMax(max_values, argmax_values, threadIdx.x, 32);
__syncthreads();
if(threadIdx.x == 0) out[row + (blockIdx.y*rows)] = max_values[0];
if(threadIdx.x == 1) outargmax[row + (blockIdx.y*rows)] = argmax_values[0];
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height)
{
extern __shared__ float max_vals[];
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kExpandToMaxoutGrad(float* error, float* indexes, float *out, int error_size, int error_rows, int maxout_level)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const int grad_size = maxout_level*error_size;
for (unsigned int i = idx;i < grad_size; i += numThreads)
out[i] = 0.0f;
for (unsigned int i = idx;i < error_size; i += numThreads)
{
int row_idx = idx - ((idx / error_rows)*error_rows);
out[row_idx + (((int)indexes[idx])*error_rows)] = error[i];
}
}
__global__ void kConstructVocabMatrix(float *vocab_idx, float *vocab_idx_y, float* vocab, float *rdm_idx, float *batch_X, float *batch_Y)
{
int middleIdx = (gridDim.y/2);
int myIdx = 0;
int myRdmIdx = 0;
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
//middle index is replaced by rdm word for batch_Y, but we still need to write the correct word into batch_X!
if(blockIdx.y != middleIdx)
{
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myIdx;
}
else
{
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
myRdmIdx = (int)rdm_idx[blockIdx.x];
vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myRdmIdx;
}
int myVocabIdx = blockDim.x*myIdx;
int myVocabRdmIdx = blockDim.x*myRdmIdx;
if(blockIdx.y != middleIdx)
{
batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
}
else
{
batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x];
batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabRdmIdx + threadIdx.x];
}
}
__global__ void concat_batches(float **batch_X, float **batch_Y, float *out_X, float *out_Y)
{
//gridDim.z = matrix_count
//gridDim.y = batch size
//gridDim.x = window_size
//blockDim.x = partial vocab size
int full_vocab_size = gridDim.z*blockDim.x;
int cols = gridDim.x*full_vocab_size;
int partial_cols = blockDim.x*gridDim.x;
//full_size times current row = current row idx
//current window position times partial_threads times current matrix = current word idx
//threadIdx.x current parameter within a word
out_X[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_X[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x];
out_Y[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_Y[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x];
}
/*
//numerically unstable?
__global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = 0;
float multiplier = -fdividef(learning_rate,float(gridDim.x));
myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
//printf("%f ",grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier);
//printf("%f ",vocab[myVocabIdx + threadIdx.x]);
//printf("%f ",vocab[myVocabIdx + threadIdx.x]+ (grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier));
if(myIdx > 10000)
atomicAdd(&vocab[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier);
//vocab[myVocabIdx + threadIdx.x] +=grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)];
//printf("%s ",!isfinite(grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier));
}
*/
//numerically unstable?
__global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab[myVocabIdx + threadIdx.x],-grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*learning_rate);
}
__global__ void kExpandDoubleVocabGradient(float *gradX, float *gradY, float *vocab_idx_X, float *vocab_idx_Y, float* vocab,
float *vocab_grad, float *vocab_grad_idx, float learning_rate, int grad_size)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
//float multiplier = fdividef(learning_rate,(float)(gridDim.x*2));
int myIdx_X = (int)vocab_idx_X[blockIdx.x+(blockIdx.y*gridDim.x)];
int myIdx_Y = (int)vocab_idx_Y[blockIdx.x+(blockIdx.y*gridDim.x)];
//int grad_cols = grad_size/blockDim.x;
int myVocabIdx_X = blockDim.x*myIdx_X;
int myVocabIdx_Y = blockDim.x*myIdx_Y;
atomicAdd(&vocab_grad[myVocabIdx_X + threadIdx.x],gradX[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
atomicAdd(&vocab_grad[myVocabIdx_Y + threadIdx.x],gradY[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
/*
vocab_grad_idx[myIdx_X] = 1.0f;
vocab_grad_idx[myIdx_Y] = 1.0f;
__syncthreads();
int block_idx = (blockIdx.y*gridDim.x) + blockIdx.x;
int threads_blocks = gridDim.x*gridDim.y;
for(int i = block_idx; i < grad_cols; i+=threads_blocks)
{
if(vocab_grad_idx[i] == 1.0f)
{
vocab[(i*blockDim.x) + threadIdx.x] -= vocab_grad[(i*blockDim.x) + threadIdx.x]*multiplier;
}
}
*/
}
/*
__global__ void kExpandVocabGradient_sharedMemory(float *grad, float *vocab_idx, float *vocab_grad, float *sorted_vocab_idx, vocab_idx_size)
{
//vocab_vector_size = blockDim.x;
//batch_size = gridDim.x
//try different configs for gridDim.x, e.g 16, 32 etc.
//will have vocab_vector_size = blockDim.x elements e.g. 64
extern __shared__ float sGrads[];
float myWordIdx = 0.0f;
float last_word = 0.0f;
float currentIdx = 0.0f;
sGrads[threadIdx.x] = 0.0f;
for(int word = blockIdx.x; currentIdx < vocab_idx_size; word++)
{
for(int i = currentIdx; i < vocab_idx_size; i++, currentIdx++)
{
}
}
}
*/
__global__ void kExpandVocabGradient(float *grad, float *vocab_idx, float *vocab_grad)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
}
__global__ void kExpandPartialVocabGradient(float *grad, float *vocab_idx, float *vocab_grad, int matrix_idx, int matrix_count)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
int offset = matrix_idx*gridDim.x*blockDim.x;
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*(blockDim.x*matrix_count)*gridDim.x) + (threadIdx.x*gridDim.x) + offset]);
}
__global__ void kExpandVocabGradientMiddleWord(float *grad, float *vocab_idx, float *vocab_grad)
{
//vocab_vector_size = blockDim.x;
//vocab_idx_rows = batch_size = gridDim.x
//vocab_idx_cols = window_size = gridDim.y
if(blockIdx.x+(blockIdx.y*gridDim.x) == gridDim.y/2)
{
int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)];
int myVocabIdx = blockDim.x*myIdx;
atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]);
}
}
__global__ void kDot8bit(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB)
{
const unsigned int threads_per_block = blockDim.x*blockDim.y;
const int mygrid = blockIdx.x;
const int myidx = (threadIdx.y*blockDim.x)+threadIdx.x;
__shared__ float tbl_floatsA[256];
__shared__ float tbl_floatsB[256];
for(int i = myidx; i < 126; i++)
{
tbl_floatsA[i] = flt_tbl[i]*precisionA;
tbl_floatsA[i+128] = -tbl_floatsA[i];
tbl_floatsB[i] = flt_tbl[i]*precisionB;
tbl_floatsB[i+128] = -tbl_floatsB[i];
}
tbl_floatsA[126] = 0.0f;
tbl_floatsB[126] = 0.0f;
tbl_floatsA[127] = precisionA;
tbl_floatsB[127] = -precisionA;
tbl_floatsA[254] = -0.0f;
tbl_floatsB[254] = -0.0f;
tbl_floatsA[255] = precisionB;
tbl_floatsB[255] = -precisionB;
__syncthreads();
for(int Arow = mygrid; Arow < rowsA; Arow+=gridDim.x)
{
for(int Bcol = myidx; Bcol < colsB; Bcol+=threads_per_block)
{
int idxout = (Bcol*rowsA) + Arow;
for(int Acol = 0; Acol < colsA; Acol++)
out[idxout] += tbl_floatsA[A[(Acol*rowsA)+Arow]] * tbl_floatsB[B[(colsA*Bcol) + Acol]];
}
}
}
__global__ void kDot8bit_shared(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB)
{
int myidx = (threadIdx.y*blockDim.x)+threadIdx.x;
__shared__ unsigned char A_tile[64][256]; //64x32 banks
__shared__ unsigned char B_tile[64][256];//256x8 banks
__shared__ float tbl_floatsA[256];
__shared__ float tbl_floatsB[256];
for(int i = myidx; i < 126; i++)
{
tbl_floatsA[i] = flt_tbl[i]*precisionA;
tbl_floatsA[i+128] = -tbl_floatsA[i];
tbl_floatsB[i] = flt_tbl[i]*precisionB;
tbl_floatsB[i+128] = -tbl_floatsB[i];
}
tbl_floatsA[126] = 0.0f;
tbl_floatsB[126] = 0.0f;
tbl_floatsA[127] = precisionA;
tbl_floatsB[127] = -precisionA;
tbl_floatsA[254] = -0.0f;
tbl_floatsB[254] = -0.0f;
tbl_floatsA[255] = precisionB;
tbl_floatsB[255] = -precisionB;
__syncthreads();
int offset = 0;
myidx = threadIdx.y*16;
int Arow = threadIdx.x+(blockIdx.x*64);
int Acol = (threadIdx.y*16)+(blockIdx.y*256);
if(Arow < rowsA)
{
for(int i = 0; i < 16; i++){ A_tile[threadIdx.x][myidx+i] = A[((Acol+i)*rowsA)+ Arow]; }
for(int i = threadIdx.y; i < colsB; i+=blockDim.y){ out[((i)*rowsA) + Arow] = 0.0f; }
}
else
for(int i = 0; i < 16; i++){ A_tile[threadIdx.x][myidx+i] = 126; }
for(int Btile = 0 ; Btile < colsB; Btile+=64)
{
if(Btile+threadIdx.x < colsB)
{
for(int i = 0; i < 16; i++)
{
if(Acol+i < colsA)
B_tile[threadIdx.x][myidx+i] = B[((threadIdx.x + Btile)*colsA)+ Acol+i];//B_tile is transposed to avoid bank conflicts with 64 threads
else
B_tile[threadIdx.x][myidx+i] = 126;
}
}
else
{
for(int i = 0; i < 16; i++)
B_tile[threadIdx.x][myidx+i] = 126;//B_tile is transposed to avoid bank conflicts with 64 threads
}
__syncthreads();
for(int Bcol2 = offset; Bcol2 < 64 + offset; Bcol2++)
{
for (int i = 0; i < 16; ++i)
atomicAdd(&out[((Bcol2)*rowsA) + Arow],
tbl_floatsA[A_tile[threadIdx.x][myidx + i]] *
tbl_floatsB[B_tile[Bcol2-offset][myidx + i]]);
}
offset +=64;
}
}
__global__ void MatMul(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue;
}
static __device__ void saxpy(float alpha, const float* b, float* c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
__global__ void sgemm_kernel_N_N_64_16_16_16_4(float* C,const float* A,const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
__global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += inx + __mul24( iby + iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][17];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[inx][iny+i] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
__global__ void sgemm_kernel_N_T_64_16_4_16_4(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
if ( iby + tx >= n )
B += iby + 0;
else
B += iby + tx;
/*
Taking care of boundary cases where K < 4.
*/
if ( ty >= k )
B += __mul24( 0, ldb );
else
B += __mul24( ty, ldb );
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
int s2=lda, s3=2*lda, s4=3*lda;
switch (k) {
case 1: s2=0; s3=0; s4=0; break;
case 2: s2=lda; s3=0; s4=0; break;
case 3: s2=lda; s3=2*lda; s4=0; break;
}
C += ibx + idt + __mul24( iby, ldc );
float Ap[4] = { A[0], A[s2], A[s3], A[s4] };
float b = B[0];
const float *Bend = B + ldb*(k - k % 4);
B += 4*ldb;
A += 4*lda;
__shared__ float Bb[4][16];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 7 ) {
do {
float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]};
Bb[ty][tx]=b;
__syncthreads();
Ap[0] = A[0];
Ap[1] = A[s2];
Ap[2] = A[s3];
Ap[3] = A[s4];
b=B[0];
saxpy( Ab[0], &Bb[0][0], Cb );
saxpy( Ab[1], &Bb[1][0], Cb );
saxpy( Ab[2], &Bb[2][0], Cb );
saxpy( Ab[3], &Bb[3][0], Cb );
A += 4*lda;
B += 4*ldb;
__syncthreads();
} while (B < Bend);
}
if ( k > 3 ) {
Bb[ty][tx]=b;
int k1 = k - k % 4;
if ( (k1+ty) >= k )
B -= 4*ldb;
else
B -= 0*ldb;
if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else
if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else
if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; }
__syncthreads();
b=B[0];
saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0];
saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2];
saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3];
saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4];
}
k = k % 4;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx]=b;
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( Ap[i], &Bb[i][0], Cb );
}
}
if ( (iby+16)>=n) {
lda = n-iby;
}
else{
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
__global__ void sgemm_kernel_T_N_32_32_8_8_8(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta )
{
const int ibx = blockIdx.x * 32;
const int iby = blockIdx.y * 32;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
int idt = tx*8 + ty;
if ( ty >= k )
A += __mul24(ibx, lda) + 0;
else
A += __mul24(ibx, lda) + ty;
if ( (ibx + tx) >= m )
A += __mul24(0, lda);
else
A += __mul24(tx, lda);
if ( (iby+tx) >= n )
B += __mul24(iby+0, ldb);
else
B += __mul24(iby+tx, ldb);
if ( ty >= k )
B += 0;
else
B += ty;
C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc );
lda = lda * 8;
ldb = ldb * 8;
int as1=0, as2=lda, as3=2*lda, as4=3*lda;
int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb;
switch(k) {
case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break;
case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break;
case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break;
}
if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; }
if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; }
float b = B[bs1];
float b1 = B[bs2];
float b2 = B[bs3];
float b3 = B[bs4];
float Ap[4] = { A[as1], A[as2], A[as3], A[as4] };
const float *Bend = B + (k - k % 8);
B += 8;
A += 8;
__shared__ float Bb[8][33];
__shared__ float ABb[32][9];
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int l = 17*(idt/32);
int idt1 = idt;
idt = idt % 32;
if ( k > 15 ) {
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if ( k > 7 ) {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k - k % 8;
if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; }
if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; }
as1=0;
saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
}
k = k % 8;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( ABb[idt][i], &Bb[i][l], Cb );
}
}
if ( (iby+16*(idt1/32+1)) >= n ) {
lda = n - iby - 16*(idt1/32);
}
else {
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
|
b3448e48a5deb73fca803a66fc232b1cb77bc7a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Modifications: Jason Bunk, Jan. 2017
* convolutionNotSeparableGPU()
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "check_macros.h"
#include "convolutionTexture.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific defines
////////////////////////////////////////////////////////////////////////////////
//Maps to a single instruction on G8x / G9x / G10x
//#define IMAD(a, b, c) ( __mul24((a), (b)) + (c) )
#define IMAD(a, b, c) ( ((a) * (b)) + (c) )
//Use unrolled innermost convolution loop
#define UNROLL_INNER 1
//Round a / b to nearest higher integer value
inline int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
inline int iAlignUp(int a, int b) {
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel and input array storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel[KERNEL_LENGTH];
__constant__ float c_notsep_Kernel[KERNEL_LENGTH*KERNEL_LENGTH];
void setConvolutionKernel(float *h_Kernel) {
hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float));
}
void setNotSeparableConvolutionKernel(float *h_Kernel) {
hipMemcpyToSymbol(c_notsep_Kernel, h_Kernel, KERNEL_LENGTH * KERNEL_LENGTH * sizeof(float));
}
//texture<float, 2, hipReadModeElementType> texSrc;
// 2D float texture
texture<float, hipTextureType2D, hipReadModeElementType> texSrc;
void convolution_setTextureParams() {
// Set texture reference parameters
texSrc.addressMode[0] = hipAddressModeClamp; // AddressModeBorder == zero outside the texture
texSrc.addressMode[1] = hipAddressModeClamp; // AddressModeClamp == last row or column is repeated
texSrc.filterMode = hipFilterModePoint; // disable interpolation (i.e. use "nearest-neighbor")
}
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float x, float y) {
return tex2D(texSrc, x + (float)(KERNEL_RADIUS - i), y) * c_Kernel[i] + convolutionRow<i - 1>(x, y);
}
template<> __device__ float convolutionRow<-1>(float x, float y) {
return 0;
}
template<int i> __device__ float convolutionColumn(float x, float y) {
return tex2D(texSrc, x, y + (float)(KERNEL_RADIUS - i)) * c_Kernel[i] + convolutionColumn<i - 1>(x, y);
}
template<> __device__ float convolutionColumn<-1>(float x, float y) {
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
int imageW,
int imageH
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
#if(UNROLL_INNER)
sum = convolutionRow<2 *KERNEL_RADIUS>(x, y);
#else
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
sum += tex2D(texSrc, x + (float)k, y) * c_Kernel[KERNEL_RADIUS - k];
}
#endif
d_Dst[IMAD(iy, imageW, ix)] = sum;
}
void convolutionRowsGPU(
float *d_Dst,
hipArray *a_Src,
int imageW,
int imageH
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(hipBindTextureToArray(texSrc, a_Src));
hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
imageW,
imageH
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipUnbindTexture(texSrc));
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
int imageW,
int imageH
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
#if(UNROLL_INNER)
sum = convolutionColumn<2 *KERNEL_RADIUS>(x, y);
#else
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
sum += tex2D(texSrc, x, y + (float)k) * c_Kernel[KERNEL_RADIUS - k];
}
#endif
d_Dst[IMAD(iy, imageW, ix)] = sum;
}
void convolutionColumnsGPU(
float *d_Dst,
hipArray *a_Src,
int imageW,
int imageH
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(hipBindTextureToArray(texSrc, a_Src));
hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
imageW,
imageH
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipUnbindTexture(texSrc));
}
////////////////////////////////////////////////////////////////////////////////
// Not-separable convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionNotSeparableKernel(
float *d_Dst,
int imageW,
int imageH,
float normalization
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
for(int ii = -KERNEL_RADIUS; ii <= KERNEL_RADIUS; ++ii) {
for(int jj = -KERNEL_RADIUS; jj <= KERNEL_RADIUS; ++jj) {
// technically cross-correlation so don't use with asymmetric kernels
sum += tex2D(texSrc, x + static_cast<float>(ii), y + static_cast<float>(jj))
* c_notsep_Kernel[(ii+KERNEL_RADIUS) * KERNEL_LENGTH + (jj+KERNEL_RADIUS)];
}
}
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
d_Dst[IMAD(iy, imageW, ix)] += (sqrtf(fabs(sum)) / normalization);
}
void convolutionNotSeparableGPU(
float *d_Dst,
hipArray *a_Src,
int imageW,
int imageH,
float normalization
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(hipBindTextureToArray(texSrc, a_Src));
convolution_setTextureParams();
hipLaunchKernelGGL(( convolutionNotSeparableKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
imageW,
imageH,
normalization
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipUnbindTexture(texSrc));
}
|
b3448e48a5deb73fca803a66fc232b1cb77bc7a7.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Modifications: Jason Bunk, Jan. 2017
* convolutionNotSeparableGPU()
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "check_macros.h"
#include "convolutionTexture.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific defines
////////////////////////////////////////////////////////////////////////////////
//Maps to a single instruction on G8x / G9x / G10x
//#define IMAD(a, b, c) ( __mul24((a), (b)) + (c) )
#define IMAD(a, b, c) ( ((a) * (b)) + (c) )
//Use unrolled innermost convolution loop
#define UNROLL_INNER 1
//Round a / b to nearest higher integer value
inline int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
inline int iAlignUp(int a, int b) {
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel and input array storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel[KERNEL_LENGTH];
__constant__ float c_notsep_Kernel[KERNEL_LENGTH*KERNEL_LENGTH];
void setConvolutionKernel(float *h_Kernel) {
cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float));
}
void setNotSeparableConvolutionKernel(float *h_Kernel) {
cudaMemcpyToSymbol(c_notsep_Kernel, h_Kernel, KERNEL_LENGTH * KERNEL_LENGTH * sizeof(float));
}
//texture<float, 2, cudaReadModeElementType> texSrc;
// 2D float texture
texture<float, cudaTextureType2D, cudaReadModeElementType> texSrc;
void convolution_setTextureParams() {
// Set texture reference parameters
texSrc.addressMode[0] = cudaAddressModeClamp; // AddressModeBorder == zero outside the texture
texSrc.addressMode[1] = cudaAddressModeClamp; // AddressModeClamp == last row or column is repeated
texSrc.filterMode = cudaFilterModePoint; // disable interpolation (i.e. use "nearest-neighbor")
}
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float x, float y) {
return tex2D(texSrc, x + (float)(KERNEL_RADIUS - i), y) * c_Kernel[i] + convolutionRow<i - 1>(x, y);
}
template<> __device__ float convolutionRow<-1>(float x, float y) {
return 0;
}
template<int i> __device__ float convolutionColumn(float x, float y) {
return tex2D(texSrc, x, y + (float)(KERNEL_RADIUS - i)) * c_Kernel[i] + convolutionColumn<i - 1>(x, y);
}
template<> __device__ float convolutionColumn<-1>(float x, float y) {
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
int imageW,
int imageH
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
#if(UNROLL_INNER)
sum = convolutionRow<2 *KERNEL_RADIUS>(x, y);
#else
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
sum += tex2D(texSrc, x + (float)k, y) * c_Kernel[KERNEL_RADIUS - k];
}
#endif
d_Dst[IMAD(iy, imageW, ix)] = sum;
}
void convolutionRowsGPU(
float *d_Dst,
cudaArray *a_Src,
int imageW,
int imageH
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(cudaBindTextureToArray(texSrc, a_Src));
convolutionRowsKernel<<<blocks, threads>>>(
d_Dst,
imageW,
imageH
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaUnbindTexture(texSrc));
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
int imageW,
int imageH
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
#if(UNROLL_INNER)
sum = convolutionColumn<2 *KERNEL_RADIUS>(x, y);
#else
for (int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++) {
sum += tex2D(texSrc, x, y + (float)k) * c_Kernel[KERNEL_RADIUS - k];
}
#endif
d_Dst[IMAD(iy, imageW, ix)] = sum;
}
void convolutionColumnsGPU(
float *d_Dst,
cudaArray *a_Src,
int imageW,
int imageH
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(cudaBindTextureToArray(texSrc, a_Src));
convolutionColumnsKernel<<<blocks, threads>>>(
d_Dst,
imageW,
imageH
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaUnbindTexture(texSrc));
}
////////////////////////////////////////////////////////////////////////////////
// Not-separable convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionNotSeparableKernel(
float *d_Dst,
int imageW,
int imageH,
float normalization
) {
const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x);
const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y);
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if (ix >= imageW || iy >= imageH) {
return;
}
float sum = 0;
for(int ii = -KERNEL_RADIUS; ii <= KERNEL_RADIUS; ++ii) {
for(int jj = -KERNEL_RADIUS; jj <= KERNEL_RADIUS; ++jj) {
// technically cross-correlation so don't use with asymmetric kernels
sum += tex2D(texSrc, x + static_cast<float>(ii), y + static_cast<float>(jj))
* c_notsep_Kernel[(ii+KERNEL_RADIUS) * KERNEL_LENGTH + (jj+KERNEL_RADIUS)];
}
}
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
d_Dst[IMAD(iy, imageW, ix)] += (sqrtf(fabs(sum)) / normalization);
}
void convolutionNotSeparableGPU(
float *d_Dst,
cudaArray *a_Src,
int imageW,
int imageH,
float normalization
) {
dim3 threads(16, 12);
dim3 blocks(iDivUp(imageW, threads.x), iDivUp(imageH, threads.y));
CUDA_CHECK(cudaBindTextureToArray(texSrc, a_Src));
convolution_setTextureParams();
convolutionNotSeparableKernel<<<blocks, threads>>>(
d_Dst,
imageW,
imageH,
normalization
);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaUnbindTexture(texSrc));
}
|
70cce8c37732cecb3b8c3d6b111f7669a0588fc0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_a;
int xdim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_a;
int ydim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_a;
int xdim1_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_a;
int ydim1_update_halo_kernel5_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_a * (y) + \
xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_a * (y) + \
xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a *
ydim0_update_halo_kernel5_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a *
ydim1_update_halo_kernel5_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 128))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(128, "update_halo_kernel5_plus_4_a");
OPS_kernels[128].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel5_plus_4_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel5_plus_4_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel5_plus_4_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel5_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[128].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[128].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[128].mpi_time += t2 - t1;
OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
70cce8c37732cecb3b8c3d6b111f7669a0588fc0.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_a;
int xdim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_a;
int ydim0_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_a;
int xdim1_update_halo_kernel5_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_a;
int ydim1_update_halo_kernel5_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_a * (y) + \
xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_a * (y) + \
xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a *
ydim0_update_halo_kernel5_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a *
ydim1_update_halo_kernel5_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 128))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(128, "update_halo_kernel5_plus_4_a");
OPS_kernels[128].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel5_plus_4_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel5_plus_4_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel5_plus_4_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel5_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[128].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_4_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[128].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[128].mpi_time += t2 - t1;
OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[128].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
fed0ac19624a3a262a27995dd4d17703ab781a09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "SteerForWander.h"
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
steerForWander2DKernel(VehicleData *vehicleData, float *random, float dt, float3 *steeringVectors, float2 *wanderData, float weight, kernel_options options);
OpenSteer::SteerForWander::SteerForWander(float weight, kernel_options options)
{
d_randomNumbers = NULL;
d_wanderData = NULL;
randomizedVector = NULL;
threadsPerBlock = 128;
this->weight = weight;
this->options = options;
}
OpenSteer::SteerForWander::~SteerForWander() {}
void OpenSteer::SteerForWander::init()
{
// random number generator
randomizedVector = new RandomizedVector(2*getNumberOfAgents());
// device memory for wander data
mem_size_wander = getNumberOfAgents()*sizeof(float2);
hipError_t retval = hipMalloc((void **)&d_wanderData, mem_size_wander);
if (retval != hipSuccess)
cout << "Error while allocating d_wanderData memory: " << hipGetErrorString(retval) << endl;
// d_wanderData memset
hipMemset(d_wanderData, 0, mem_size_wander);
// device memory for random numbers
mem_size_random = randomizedVector->size() * sizeof(float);
retval = hipMalloc((void **)&d_randomNumbers, mem_size_random);
if (retval != hipSuccess)
cout << "Error while allocating d_randomNumbers memory: " << hipGetErrorString(retval) << endl;
}
void OpenSteer::SteerForWander::run()
{
// renew random numbers
randomizedVector->renew();
hipMemcpy(d_randomNumbers, randomizedVector->getVector(), mem_size_random, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( steerForWander2DKernel), dim3(gridDim()), dim3(blockDim()), 0, 0, getVehicleData(), d_randomNumbers, getElapsedTime(), getSteeringVectors(), d_wanderData, weight, options);
}
void OpenSteer::SteerForWander::close()
{
if (d_wanderData != NULL) {
hipFree(d_wanderData);
d_wanderData = NULL;
}
if (d_randomNumbers != NULL) {
hipFree(d_randomNumbers);
d_randomNumbers = NULL;
}
if (randomizedVector != NULL) {
delete randomizedVector;
randomizedVector = NULL;
}
}
|
fed0ac19624a3a262a27995dd4d17703ab781a09.cu
|
#include "SteerForWander.h"
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "CUDAKernelOptions.cu"
#include <iostream>
using namespace OpenSteer;
using namespace std;
__global__ void
steerForWander2DKernel(VehicleData *vehicleData, float *random, float dt, float3 *steeringVectors, float2 *wanderData, float weight, kernel_options options);
OpenSteer::SteerForWander::SteerForWander(float weight, kernel_options options)
{
d_randomNumbers = NULL;
d_wanderData = NULL;
randomizedVector = NULL;
threadsPerBlock = 128;
this->weight = weight;
this->options = options;
}
OpenSteer::SteerForWander::~SteerForWander() {}
void OpenSteer::SteerForWander::init()
{
// random number generator
randomizedVector = new RandomizedVector(2*getNumberOfAgents());
// device memory for wander data
mem_size_wander = getNumberOfAgents()*sizeof(float2);
cudaError_t retval = cudaMalloc((void **)&d_wanderData, mem_size_wander);
if (retval != cudaSuccess)
cout << "Error while allocating d_wanderData memory: " << cudaGetErrorString(retval) << endl;
// d_wanderData memset
cudaMemset(d_wanderData, 0, mem_size_wander);
// device memory for random numbers
mem_size_random = randomizedVector->size() * sizeof(float);
retval = cudaMalloc((void **)&d_randomNumbers, mem_size_random);
if (retval != cudaSuccess)
cout << "Error while allocating d_randomNumbers memory: " << cudaGetErrorString(retval) << endl;
}
void OpenSteer::SteerForWander::run()
{
// renew random numbers
randomizedVector->renew();
cudaMemcpy(d_randomNumbers, randomizedVector->getVector(), mem_size_random, cudaMemcpyHostToDevice);
steerForWander2DKernel<<<gridDim(), blockDim()>>>(getVehicleData(), d_randomNumbers, getElapsedTime(), getSteeringVectors(), d_wanderData, weight, options);
}
void OpenSteer::SteerForWander::close()
{
if (d_wanderData != NULL) {
cudaFree(d_wanderData);
d_wanderData = NULL;
}
if (d_randomNumbers != NULL) {
cudaFree(d_randomNumbers);
d_randomNumbers = NULL;
}
if (randomizedVector != NULL) {
delete randomizedVector;
randomizedVector = NULL;
}
}
|
f4484c2fe3b5718f8ca96cafd3f4cb80976713cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS REPLACE_ITERATIONS
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
texture<float,1,hipReadModeElementType> texmem5;
texture<float,1,hipReadModeElementType> texmem6;
texture<float,1,hipReadModeElementType> texmem7;
texture<float,1,hipReadModeElementType> texmem9;
texture<float,1,hipReadModeElementType> texmem8;
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
float Value=0;float Value1=0;float Value2=0;float Value3=0;float Value4=0;float Value5=0;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
Value1 = tex1Dfetch(texmem1,tid) + Value5;
Value2 = tex1Dfetch(texmem2,tid) + Value4;
Value3 = tex1Dfetch(texmem3,tid) + Value3;
Value4 = tex1Dfetch(texmem4,tid) + Value1;
Value5 = tex1Dfetch(texmem5,tid) + Value2;
Value+=i+Value5+Value3;
}
}
__syncthreads();
out[tid]=Value;
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
hipMalloc((void**) &device_texture1, texmem_size);
hipMalloc((void**) &device_texture2, texmem_size);
hipMalloc((void**) &device_texture3, texmem_size);
hipMalloc((void**) &device_texture4, texmem_size);
hipMalloc((void**) &device_texture5, texmem_size);
hipMalloc((void**) &device_texture6, texmem_size);
hipMalloc((void**) &device_texture7, texmem_size);
hipMalloc((void**) &device_texture8, texmem_size);
hipMalloc((void**) &device_texture9, texmem_size);
hipMalloc((void**) &device_out, texmem_size*10);
hipMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, texmem_size);
hipBindTexture(0, texmem2, device_texture2, texmem_size);
hipBindTexture(0, texmem3, device_texture3, texmem_size);
hipBindTexture(0, texmem4, device_texture4, texmem_size);
hipBindTexture(0, texmem5, device_texture5, texmem_size);
hipBindTexture(0, texmem6, device_texture6, texmem_size);
hipBindTexture(0, texmem7, device_texture7, texmem_size);
hipBindTexture(0, texmem8, device_texture8, texmem_size);
hipBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( tex_bm_kernel), dim3(grid), dim3(threads), 0 , 0, device_out, texmem_size);
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
hipMemcpy(host_out, device_out, texmem_size*sizeof(float), hipMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
f4484c2fe3b5718f8ca96cafd3f4cb80976713cb.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <cuda.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS REPLACE_ITERATIONS
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
texture<float,1,cudaReadModeElementType> texmem5;
texture<float,1,cudaReadModeElementType> texmem6;
texture<float,1,cudaReadModeElementType> texmem7;
texture<float,1,cudaReadModeElementType> texmem9;
texture<float,1,cudaReadModeElementType> texmem8;
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
float Value=0;float Value1=0;float Value2=0;float Value3=0;float Value4=0;float Value5=0;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
Value1 = tex1Dfetch(texmem1,tid) + Value5;
Value2 = tex1Dfetch(texmem2,tid) + Value4;
Value3 = tex1Dfetch(texmem3,tid) + Value3;
Value4 = tex1Dfetch(texmem4,tid) + Value1;
Value5 = tex1Dfetch(texmem5,tid) + Value2;
Value+=i+Value5+Value3;
}
}
__syncthreads();
out[tid]=Value;
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
cudaMalloc((void**) &device_texture1, texmem_size);
cudaMalloc((void**) &device_texture2, texmem_size);
cudaMalloc((void**) &device_texture3, texmem_size);
cudaMalloc((void**) &device_texture4, texmem_size);
cudaMalloc((void**) &device_texture5, texmem_size);
cudaMalloc((void**) &device_texture6, texmem_size);
cudaMalloc((void**) &device_texture7, texmem_size);
cudaMalloc((void**) &device_texture8, texmem_size);
cudaMalloc((void**) &device_texture9, texmem_size);
cudaMalloc((void**) &device_out, texmem_size*10);
cudaMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, texmem_size);
cudaBindTexture(0, texmem2, device_texture2, texmem_size);
cudaBindTexture(0, texmem3, device_texture3, texmem_size);
cudaBindTexture(0, texmem4, device_texture4, texmem_size);
cudaBindTexture(0, texmem5, device_texture5, texmem_size);
cudaBindTexture(0, texmem6, device_texture6, texmem_size);
cudaBindTexture(0, texmem7, device_texture7, texmem_size);
cudaBindTexture(0, texmem8, device_texture8, texmem_size);
cudaBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
tex_bm_kernel<<< grid, threads, 0 >>>(device_out, texmem_size);
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
cudaMemcpy(host_out, device_out, texmem_size*sizeof(float), cudaMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
ff6d14d1d36ec8260ad2e2dd7a8612bc79dc69f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holdvr nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Attention Example.
This workload computes a fused multi head attention.
Because it keeps the attention matrix in shared memory, it's both faster and
uses less global memory.
This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_,
and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_.
Algorithm:
In short, we can compute the output incrementally in blocks of size B,
we just need to divide the final result by the sum of all coefficients in
the softmax (which we compute incrementally) with the following pseudo-code:
```
s_prime = torch.zeros([num_queries, B])
O = torch.zeros([num_queries, head_size_v])
for i in range(0, K.shape[0], B):
si = exp((Q . K[i * B:(i+1) * B].t) * scale)
sum_coefs += attn_unscaled.sum(-1)
O += si . V[i * B:(i+1) * B]
O = O / s_prime
```
In practice, and for numerical stability reasons,
we also substract the maximum so far (`mi`) before doing
the exponential. When we encounter new keys, the maximum
used to compute O so far (`m_prime`) can differ from the
current maximum, so we update O before accumulating with
```
O = O * exp(m_prime - mi)
m_prime = mi
```
Implementation details:
- `si` is stored in shared memory between the 2 back to back gemms
- we keep and accumulate the output
directly in registers if we can (`head_size_v <= 128`).
Otherwise, we store it & accumulate in global memory (slower)
- blocks are parallelized across the batch dimension, the number
of heads, and the query sequence size
Examples:
# Run an attention example with default setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen
# Run an attention example with custom setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true
Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers).
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
#include "cutlass/fast_math.h"
#include "kernel_forward.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
hipError_t error = hipSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool use_mask;
bool causal;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real;
int alignment;
int head_number;
int batch_size;
int head_size;
int head_size_v;
int seq_length;
int seq_length_kv;
int iterations;
// alpha0, alpha1 and beta are fixed
// in this multi-head attention example
float alpha0;
float alpha1;
float beta;
//
// Methods
//
Options():
help(false),
error(false),
alignment(1),
reference_check(true),
head_number(12),
batch_size(16),
head_size(64),
head_size_v(64),
seq_length(1024),
seq_length_kv(1024),
use_mask(false),
iterations(20),
causal(false)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 1);
cmd.get_cmd_line_argument("head_number", head_number, 12);
cmd.get_cmd_line_argument("batch_size", batch_size, 16);
cmd.get_cmd_line_argument("head_size", head_size, 64);
cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size);
cmd.get_cmd_line_argument("seq_length", seq_length, 1024);
cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length);
cmd.get_cmd_line_argument("use_mask", use_mask, false);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("causal", causal, true);
randomize_problems();
}
void randomize_problems() {
int problem_count = head_number * batch_size;
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
// When using mask, the original inputs are not padded
// and we need to save these info.
if (use_mask) {
problem_sizes0_real.reserve(problem_count);
problem_sizes1_real.reserve(problem_count);
}
for (int i = 0; i < batch_size; ++i) {
// problems belonging to the same batch share the same seq len
int m_real = seq_length;
int mkv_real = seq_length_kv;
int m = (m_real + alignment - 1) / alignment * alignment;
int mkv = (mkv_real + alignment - 1) / alignment * alignment;
int k0 = head_size;
int k1 = head_size_v;
for (int j = 0; j < head_number; ++j) {
cutlass::gemm::GemmCoord problem0(m, mkv, k0);
cutlass::gemm::GemmCoord problem1(m, k1, mkv);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
if (use_mask) {
cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0);
cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real);
problem_sizes0_real.push_back(problem0_real);
problem_sizes1_real.push_back(problem1_real);
}
}
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "41_fused_multi_head_attention_fixed_seqlen\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n"
<< " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n"
<< " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n"
<< " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n"
<< " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n"
<< " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n"
<< " --use_mask=<bool> If true, performs padding-like masking in softmax.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --causal=<bool> If true, uses causal masking.\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fops = int64_t();
for (int i = 0; i < problem_sizes0.size(); ++i) {
auto const& problem0 = problem_sizes0[i];
auto const& problem1 = problem_sizes1[i];
for (int row = 0; row < problem0.m(); ++row) {
int num_cols0 = problem0.n();
if (causal) {
num_cols0 = ::min(row + 1, num_cols0);
}
// P <- Q . K_t
fops += 2 * num_cols0 * problem0.k();
// P <- exp(P - max(P))
fops += 2 * num_cols0;
// S <- sum(P)
fops += num_cols0 - 1;
// O <- P . V
fops += 2 * num_cols0 * problem1.n();
// O <- O / S
fops += num_cols0 * problem1.n();
}
}
return double(fops) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Attention>
class TestbedAttention {
public:
//
// Type definitions
//
using ElementQ = typename Attention::scalar_t;
using ElementK = typename Attention::scalar_t;
using ElementP = typename Attention::accum_t;
using ElementAccumulator = typename Attention::accum_t;
using ElementV = typename Attention::scalar_t;
using ElementO = typename Attention::output_t;
using ElementCompute = typename Attention::accum_t;
using ElementNorm = typename Attention::accum_t;
using ElementSum = typename Attention::accum_t;
using ElementSoftmaxCompute = typename Attention::accum_t;
using LayoutQ = cutlass::layout::RowMajor;
using LayoutK = cutlass::layout::ColumnMajor;
using LayoutP = cutlass::layout::RowMajor;
using LayoutV = cutlass::layout::RowMajor;
using LayoutO = cutlass::layout::RowMajor;
using MatrixCoord = typename LayoutP::TensorCoord;
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_Q;
cutlass::Distribution::Kind init_K;
cutlass::Distribution::Kind init_P;
cutlass::Distribution::Kind init_V;
cutlass::Distribution::Kind init_O;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real;
std::vector<int64_t> offset_Q;
std::vector<int64_t> offset_K;
std::vector<int64_t> offset_P;
std::vector<int64_t> offset_V;
std::vector<int64_t> offset_O;
std::vector<int64_t> ldq_host;
std::vector<int64_t> ldk_host;
std::vector<int64_t> ldp_host;
std::vector<int64_t> ldv_host;
std::vector<int64_t> ldo_host;
std::vector<int64_t> seqlen_host;
cutlass::DeviceAllocation<int64_t> ldq;
cutlass::DeviceAllocation<int64_t> ldk;
cutlass::DeviceAllocation<int64_t> ldp;
cutlass::DeviceAllocation<int64_t> ldv;
cutlass::DeviceAllocation<int64_t> ldo;
cutlass::DeviceAllocation<int64_t> seqlen;
cutlass::DeviceAllocation<ElementQ> block_Q;
cutlass::DeviceAllocation<ElementK> block_K;
cutlass::DeviceAllocation<ElementP> block_P;
cutlass::DeviceAllocation<ElementV> block_V;
cutlass::DeviceAllocation<ElementO> block_O;
cutlass::DeviceAllocation<ElementNorm> block_Norm;
cutlass::DeviceAllocation<ElementSum> block_Sum;
cutlass::DeviceAllocation<int64_t> offset_P_Device;
cutlass::DeviceAllocation<ElementQ *> ptr_Q;
cutlass::DeviceAllocation<ElementK *> ptr_K;
cutlass::DeviceAllocation<ElementP *> ptr_P;
cutlass::DeviceAllocation<ElementV *> ptr_V;
cutlass::DeviceAllocation<ElementO *> ptr_O;
public:
//
// Methods
//
TestbedAttention(
Options &options_,
cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { }
int problem_count() const {
return (options.head_number * options.batch_size);
}
private:
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementP>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 8;
scope_min = -8;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
//
// Set scalors for the mha example
//
options.alpha0 = 1.0f / sqrt(float(options.head_size));
options.alpha1 = 1.0f;
options.beta = 0;
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_Q = 0;
int64_t total_elements_K = 0;
int64_t total_elements_P = 0;
int64_t total_elements_V = 0;
int64_t total_elements_O = 0;
ldq_host.resize(problem_count());
ldk_host.resize(problem_count());
ldp_host.resize(problem_count());
ldv_host.resize(problem_count());
ldo_host.resize(problem_count());
seqlen_host.resize(problem_count());
// Create tensors in BMHK format, where
// B = batch_size
// M = sequence length
// H = num_heads
// K = embedding size per head
int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O;
for (int32_t b = 0; b < options.batch_size; ++b) {
batch_offset_Q = total_elements_Q;
batch_offset_K = total_elements_K;
batch_offset_V = total_elements_V;
batch_offset_O = total_elements_O;
for (int32_t h = 0; h < options.head_number; ++h) {
int32_t i = h + b * options.head_number;
auto problem0 = options.problem_sizes0.at(i);
auto problem1 = options.problem_sizes1.at(i);
ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0);
ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0);
ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0);
ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0);
ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0);
// m = n for attention problems.
seqlen_host.at(i) = problem0.m();
offset_Q.push_back(batch_offset_Q + h * problem0.k());
offset_K.push_back(batch_offset_K + h * problem0.k());
offset_P.push_back(total_elements_P);
offset_V.push_back(batch_offset_V + h * problem0.k());
offset_O.push_back(batch_offset_O + h * problem1.n());
int64_t elements_Q = problem0.m() * problem0.k();
int64_t elements_K = problem0.k() * problem0.n();
int64_t elements_P = problem0.m() * problem0.n();
int64_t elements_V = problem1.k() * problem1.n();
int64_t elements_O = problem1.m() * problem1.n();
total_elements_Q += elements_Q;
total_elements_K += elements_K;
total_elements_P += elements_P;
total_elements_V += elements_V;
total_elements_O += elements_O;
}
}
problem_sizes_device0.reset(problem_count());
problem_sizes_device1.reset(problem_count());
problem_sizes_device0.copy_from_host(options.problem_sizes0.data());
problem_sizes_device1.copy_from_host(options.problem_sizes1.data());
if (options.use_mask) {
problem_sizes_device0_real.reset(problem_count());
problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data());
}
ldq.reset(problem_count());
ldk.reset(problem_count());
ldp.reset(problem_count());
ldv.reset(problem_count());
ldo.reset(problem_count());
seqlen.reset(problem_count());
ldq.copy_from_host(ldq_host.data());
ldk.copy_from_host(ldk_host.data());
ldp.copy_from_host(ldp_host.data());
ldv.copy_from_host(ldv_host.data());
ldo.copy_from_host(ldo_host.data());
seqlen.copy_from_host(seqlen_host.data());
//
// Assign pointers
//
block_Q.reset(total_elements_Q);
block_K.reset(total_elements_K);
block_P.reset(total_elements_P);
block_V.reset(total_elements_V);
block_O.reset(total_elements_O);
offset_P_Device.reset(problem_count());
// sync offset with device
cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size());
std::vector<ElementQ *> ptr_Q_host(problem_count());
std::vector<ElementK *> ptr_K_host(problem_count());
std::vector<ElementP *> ptr_P_host(problem_count());
std::vector<ElementV *> ptr_V_host(problem_count());
std::vector<ElementO *> ptr_O_host(problem_count());
std::vector<ElementNorm *> ptr_norm_host(problem_count());
std::vector<ElementSum *> ptr_sum_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i);
ptr_K_host.at(i) = block_K.get() + offset_K.at(i);
ptr_P_host.at(i) = block_P.get() + offset_P.at(i);
ptr_V_host.at(i) = block_V.get() + offset_V.at(i);
ptr_O_host.at(i) = block_O.get() + offset_O.at(i);
}
ptr_Q.reset(problem_count());
ptr_Q.copy_from_host(ptr_Q_host.data());
ptr_K.reset(problem_count());
ptr_K.copy_from_host(ptr_K_host.data());
ptr_P.reset(problem_count());
ptr_P.copy_from_host(ptr_P_host.data());
ptr_V.reset(problem_count());
ptr_V.copy_from_host(ptr_V_host.data());
ptr_O.reset(problem_count());
ptr_O.copy_from_host(ptr_O_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1);
initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2);
initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3);
}
template<typename Element>
bool verify_tensor_(std::vector<Element> vector_Input, \
std::vector<Element> vector_Input_Ref,
int64_t verify_length = -1) {
int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size();
size = (verify_length == -1) ? size : verify_length;
// 0.05 for absolute error
float abs_tol = 5e-2f;
// 10% for relative error
float rel_tol = 1e-1f;
for (int64_t i = 0; i < size; ++i) {
float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i));
float abs_diff = fabs(diff);
float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f);
float relative_diff = abs_diff / abs_ref;
if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) {
printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i)));
return false;
}
}
return true;
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
for (int32_t b = 0; b < options.batch_size; ++b) {
int32_t i = b * options.head_number;
// Problem size is the same for all heads
cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number);
cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number);
MatrixCoord extent_Q{problem0.m(), problem0.k()};
MatrixCoord extent_K{problem0.k(), problem0.n()};
MatrixCoord extent_P{problem0.m(), problem0.n()};
MatrixCoord extent_V{problem1.k(), problem1.n()};
MatrixCoord extent_O{problem1.m(), problem1.n()};
LayoutO layout_O(ldo_host.at(i));
std::vector<ElementO> matrix_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size());
cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O));
for (int32_t h = 0; h < options.head_number; ++h) {
i = h + b * options.head_number;
LayoutQ layout_Q(ldq_host.at(i));
LayoutK layout_K(ldk_host.at(i));
LayoutP layout_P(ldp_host.at(i));
LayoutV layout_V(ldv_host.at(i));
cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q);
cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K);
cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V);
cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O);
cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P));
cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P);
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementQ, LayoutQ,
ElementK, LayoutK,
ElementP, LayoutP,
ElementCompute, ElementAccumulator
>(
problem0,
ElementAccumulator(options.alpha0),
view_Q,
Attention::MM0::Mma::kTransformA,
view_K,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_P_device,
view_Ref_P_device,
ElementAccumulator(0)
);
// Compute softmax for P. We need to explicitly compute softmax
// over P because softmax is fused to the second GEMM in the
// profiled implementation.
std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P));
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size());
cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementNorm> vector_Norm_Ref(problem0.m());
std::vector<ElementSum> vector_Sum_Ref(problem0.m());
int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n();
// Compute softmax for reference matrix
for (int m = 0; m < problem0.m(); m++) {
int n_dim_row = n_dim;
if (options.causal) {
n_dim_row = ::min(m + 1, n_dim);
}
ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0}));
for (int n = 1; n < n_dim_row; n++) {
max = ::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})));
}
vector_Norm_Ref.at(m) = ElementNorm(max);
ElementSoftmaxCompute sum = ElementSoftmaxCompute();
for (int n = 0; n < n_dim_row; n++) {
sum += ::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max );
}
ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum);
vector_Sum_Ref.at(m) = ElementSum(inv_sum);
for (int n = 0; n < n_dim_row; n++) {
view_Ref_host.ref().at({m, n}) = ElementP(
::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum
);
}
// Mask out the rest of the attention matrix
for (int n = n_dim_row; n < n_dim; ++n) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
// when not using mask, problem_real and problem share the same sizes
if (options.use_mask) {
for (int m = 0; m < problem0.m(); m++) {
for (int n = n_dim; n < problem0.n(); n++) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
}
cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size());
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementP, LayoutP,
ElementV, LayoutV,
ElementO, LayoutO,
ElementCompute, ElementAccumulator
>(
problem1,
ElementAccumulator(options.alpha1),
view_Ref_P_device,
Attention::MM0::Mma::kTransformA,
view_V,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_O_device,
view_Ref_O_device,
ElementAccumulator(0)
);
}
// Copy to host memory
std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size());
// printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n",
// int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i]));
bool verified_O = false;
if (!verified_O) {
verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O);
}
passed = passed && verified_O;
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl;
if (!verified_O) {
std::cout << "Final matrix output is incorrect" << std::endl;
}
return passed;
}
}
return passed;
}
public:
/// Executes a CUTLASS Attention kernel and measures runtime.
Result profile() {
Result result;
result.passed = false;
// Initialize the problem
initialize_();
typename Attention::Params p;
{ // set parameters
p.query_ptr = block_Q.get();
p.key_ptr = block_K.get();
p.value_ptr = block_V.get();
p.logsumexp_ptr = nullptr; // Only needed for bw
p.output_accum_ptr = nullptr;
if (Attention::kNeedsOutputAccumulatorBuffer) {
hipMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t));
}
p.output_ptr = block_O.get();
// TODO: support arbitrary seq lengths
// if (cu_seqlens_q.has_value()) {
// p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
// p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
// }
p.scale = options.alpha0;
p.num_heads = options.head_number;
p.num_batches = options.batch_size;
p.head_dim = options.head_size;
p.head_dim_value = options.head_size_v;
p.num_queries = options.seq_length;
p.num_keys = options.seq_length_kv;
if (options.causal) {
p.custom_mask_type = Attention::CausalFromTopLeft;
}
// All tensors are in BMHK shapes
p.q_strideH = options.head_size;
p.k_strideH = options.head_size;
p.v_strideH = options.head_size_v;
p.q_strideM = int32_t(ldq_host[0]);
p.k_strideM = int32_t(ldk_host[0]);
p.v_strideM = int32_t(ldv_host[0]);
p.q_strideB = p.q_strideM * options.seq_length;
p.k_strideB = p.k_strideM * options.seq_length_kv;
p.v_strideB = p.v_strideM * options.seq_length_kv;
p.o_strideM = p.head_dim_value * p.num_heads;
}
// launch kernel :)
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
int smem_bytes = sizeof(typename Attention::SharedStorage);
if (smem_bytes > 0xc000) {
hipFuncSetAttribute(kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
}
if (!Attention::check_supported(p)) {
std::cerr << "Kernel does not support these inputs" << std::endl;
return result;
}
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p);
// Wait for completion
result.error = hipDeviceSynchronize();
if (result.error != hipSuccess) {
std::cerr << "Kernel execution error: " << hipGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run
//
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl;
return result;
}
//
// Construct events
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, 0, p);
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)hipEventDestroy(event);
}
std::cout << std::endl;
std::cout << "CUTLASS Attention:\n"
<< "====================================================" << std::endl;
std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \
<< ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\
<< ", " << options.batch_size << "}." << std::endl;
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int kQueriesPerBlock,
int kKeysPerBlock,
bool kSingleValueIteration
>
int run_attention(Options& options) {
using Attention = AttentionKernel<
cutlass::half_t, // scalar_t
cutlass::arch::Sm80, // ArchTag
true, // Memory is aligned
kQueriesPerBlock,
kKeysPerBlock,
kSingleValueIteration,
false, // Supports dropout
false // Supports bias
>;
//
// Test and profile
//
TestbedAttention<Attention> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS attention has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
if (options.use_mask) {
std::cerr << "--use_mask is not supported at the moment\n";
return -2;
}
if (options.alignment != 1) {
std::cerr << "--alignment=1 is the only supported value\n";
return -2;
}
// Determine kernel configuration based on head size.
// If head size is less than or equal to 64, each block operates over 64 queries and
// 64 keys, and partial results can be stored in the register file.
// If head size is greater than 64, each block operates over 32 queries and 128 keys,
// and partial results are stored in shared memory.
if (options.head_size_v > 64) {
static int const kQueriesPerBlock = 32;
static int const kKeysPerBlock = 128;
if (options.head_size_v <= kKeysPerBlock) {
return run_attention<kQueriesPerBlock, kKeysPerBlock, true>(options);
} else {
return run_attention<kQueriesPerBlock, kKeysPerBlock, false>(options);
}
} else {
static int const kQueriesPerBlock = 64;
static int const kKeysPerBlock = 64;
return run_attention<kQueriesPerBlock, kKeysPerBlock, true>(options);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
ff6d14d1d36ec8260ad2e2dd7a8612bc79dc69f2.cu
|
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holdvr nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Attention Example.
This workload computes a fused multi head attention.
Because it keeps the attention matrix in shared memory, it's both faster and
uses less global memory.
This is based on `"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_,
and very similar to `"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness" <https://arxiv.org/abs/2205.14135>`_.
Algorithm:
In short, we can compute the output incrementally in blocks of size B,
we just need to divide the final result by the sum of all coefficients in
the softmax (which we compute incrementally) with the following pseudo-code:
```
s_prime = torch.zeros([num_queries, B])
O = torch.zeros([num_queries, head_size_v])
for i in range(0, K.shape[0], B):
si = exp((Q . K[i * B:(i+1) * B].t) * scale)
sum_coefs += attn_unscaled.sum(-1)
O += si . V[i * B:(i+1) * B]
O = O / s_prime
```
In practice, and for numerical stability reasons,
we also substract the maximum so far (`mi`) before doing
the exponential. When we encounter new keys, the maximum
used to compute O so far (`m_prime`) can differ from the
current maximum, so we update O before accumulating with
```
O = O * exp(m_prime - mi)
m_prime = mi
```
Implementation details:
- `si` is stored in shared memory between the 2 back to back gemms
- we keep and accumulate the output
directly in registers if we can (`head_size_v <= 128`).
Otherwise, we store it & accumulate in global memory (slower)
- blocks are parallelized across the batch dimension, the number
of heads, and the query sequence size
Examples:
# Run an attention example with default setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen
# Run an attention example with custom setup
$ ./examples/41_fused_multi_head_attention/41_fused_multi_head_attention_fixed_seqlen --head_number=2 --batch_size=3 --head_size=32 --head_size_v=64 --seq_length=512 --seq_length_kv=1024 --causal=true
Acknowledgement: Fixed-sequence-length FMHA code was upstreamed by Meta xFormers (https://github.com/facebookresearch/xformers).
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm_complex.h"
#include "cutlass/util/reference/device/gemm_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/kernel/gemm_grouped.h"
#include "cutlass/gemm/kernel/gemm_transpose_operands.h"
#include "cutlass/gemm/kernel/default_gemm.h"
#include "cutlass/gemm/kernel/default_gemm_complex.h"
#include "cutlass/gemm/device/default_gemm_configuration.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/threadblock/epilogue_with_visitor.h"
#include "cutlass/fast_math.h"
#include "kernel_forward.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
bool error;
bool reference_check;
bool use_mask;
bool causal;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1;
std::vector<cutlass::gemm::GemmCoord> problem_sizes0_real;
std::vector<cutlass::gemm::GemmCoord> problem_sizes1_real;
int alignment;
int head_number;
int batch_size;
int head_size;
int head_size_v;
int seq_length;
int seq_length_kv;
int iterations;
// alpha0, alpha1 and beta are fixed
// in this multi-head attention example
float alpha0;
float alpha1;
float beta;
//
// Methods
//
Options():
help(false),
error(false),
alignment(1),
reference_check(true),
head_number(12),
batch_size(16),
head_size(64),
head_size_v(64),
seq_length(1024),
seq_length_kv(1024),
use_mask(false),
iterations(20),
causal(false)
{ }
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
return;
}
cmd.get_cmd_line_argument("alignment", alignment, 1);
cmd.get_cmd_line_argument("head_number", head_number, 12);
cmd.get_cmd_line_argument("batch_size", batch_size, 16);
cmd.get_cmd_line_argument("head_size", head_size, 64);
cmd.get_cmd_line_argument("head_size_v", head_size_v, head_size);
cmd.get_cmd_line_argument("seq_length", seq_length, 1024);
cmd.get_cmd_line_argument("seq_length_kv", seq_length_kv, seq_length);
cmd.get_cmd_line_argument("use_mask", use_mask, false);
cmd.get_cmd_line_argument("iterations", iterations, 20);
cmd.get_cmd_line_argument("reference-check", reference_check, true);
cmd.get_cmd_line_argument("causal", causal, true);
randomize_problems();
}
void randomize_problems() {
int problem_count = head_number * batch_size;
problem_sizes0.reserve(problem_count);
problem_sizes1.reserve(problem_count);
// When using mask, the original inputs are not padded
// and we need to save these info.
if (use_mask) {
problem_sizes0_real.reserve(problem_count);
problem_sizes1_real.reserve(problem_count);
}
for (int i = 0; i < batch_size; ++i) {
// problems belonging to the same batch share the same seq len
int m_real = seq_length;
int mkv_real = seq_length_kv;
int m = (m_real + alignment - 1) / alignment * alignment;
int mkv = (mkv_real + alignment - 1) / alignment * alignment;
int k0 = head_size;
int k1 = head_size_v;
for (int j = 0; j < head_number; ++j) {
cutlass::gemm::GemmCoord problem0(m, mkv, k0);
cutlass::gemm::GemmCoord problem1(m, k1, mkv);
problem_sizes0.push_back(problem0);
problem_sizes1.push_back(problem1);
if (use_mask) {
cutlass::gemm::GemmCoord problem0_real(m_real, mkv_real, k0);
cutlass::gemm::GemmCoord problem1_real(m_real, k1, mkv_real);
problem_sizes0_real.push_back(problem0_real);
problem_sizes1_real.push_back(problem1_real);
}
}
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "41_fused_multi_head_attention_fixed_seqlen\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --head_number=<int> Head number in multi-head attention (default: --head_number=12)\n"
<< " --batch_size=<int> Batch size in multi-head attention (default: --batch_size=16)\n"
<< " --head_size=<int> Head size in multi-head attention (default: --head_size=64)\n"
<< " --head_size_v=<int> Head size in multi-head attention for V (default: --head_size_v=head_size)\n"
<< " --seq_length=<int> Sequence length in multi-head attention for Q (default: --seq_length=1024)\n"
<< " --seq_length_kv=<int> Sequence length in multi-head attention for K/V (default: --seq_length_kv=seq_length)\n"
<< " --use_mask=<bool> If true, performs padding-like masking in softmax.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --reference-check=<bool> If true, performs reference check.\n"
<< " --causal=<bool> If true, uses causal masking.\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fops = int64_t();
for (int i = 0; i < problem_sizes0.size(); ++i) {
auto const& problem0 = problem_sizes0[i];
auto const& problem1 = problem_sizes1[i];
for (int row = 0; row < problem0.m(); ++row) {
int num_cols0 = problem0.n();
if (causal) {
num_cols0 = std::min(row + 1, num_cols0);
}
// P <- Q . K_t
fops += 2 * num_cols0 * problem0.k();
// P <- exp(P - max(P))
fops += 2 * num_cols0;
// S <- sum(P)
fops += num_cols0 - 1;
// O <- P . V
fops += 2 * num_cols0 * problem1.n();
// O <- O / S
fops += num_cols0 * problem1.n();
}
}
return double(fops) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Attention>
class TestbedAttention {
public:
//
// Type definitions
//
using ElementQ = typename Attention::scalar_t;
using ElementK = typename Attention::scalar_t;
using ElementP = typename Attention::accum_t;
using ElementAccumulator = typename Attention::accum_t;
using ElementV = typename Attention::scalar_t;
using ElementO = typename Attention::output_t;
using ElementCompute = typename Attention::accum_t;
using ElementNorm = typename Attention::accum_t;
using ElementSum = typename Attention::accum_t;
using ElementSoftmaxCompute = typename Attention::accum_t;
using LayoutQ = cutlass::layout::RowMajor;
using LayoutK = cutlass::layout::ColumnMajor;
using LayoutP = cutlass::layout::RowMajor;
using LayoutV = cutlass::layout::RowMajor;
using LayoutO = cutlass::layout::RowMajor;
using MatrixCoord = typename LayoutP::TensorCoord;
private:
//
// Data members
//
Options & options;
/// Initialization
cutlass::Distribution::Kind init_Q;
cutlass::Distribution::Kind init_K;
cutlass::Distribution::Kind init_P;
cutlass::Distribution::Kind init_V;
cutlass::Distribution::Kind init_O;
uint32_t seed;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device1;
cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device0_real;
std::vector<int64_t> offset_Q;
std::vector<int64_t> offset_K;
std::vector<int64_t> offset_P;
std::vector<int64_t> offset_V;
std::vector<int64_t> offset_O;
std::vector<int64_t> ldq_host;
std::vector<int64_t> ldk_host;
std::vector<int64_t> ldp_host;
std::vector<int64_t> ldv_host;
std::vector<int64_t> ldo_host;
std::vector<int64_t> seqlen_host;
cutlass::DeviceAllocation<int64_t> ldq;
cutlass::DeviceAllocation<int64_t> ldk;
cutlass::DeviceAllocation<int64_t> ldp;
cutlass::DeviceAllocation<int64_t> ldv;
cutlass::DeviceAllocation<int64_t> ldo;
cutlass::DeviceAllocation<int64_t> seqlen;
cutlass::DeviceAllocation<ElementQ> block_Q;
cutlass::DeviceAllocation<ElementK> block_K;
cutlass::DeviceAllocation<ElementP> block_P;
cutlass::DeviceAllocation<ElementV> block_V;
cutlass::DeviceAllocation<ElementO> block_O;
cutlass::DeviceAllocation<ElementNorm> block_Norm;
cutlass::DeviceAllocation<ElementSum> block_Sum;
cutlass::DeviceAllocation<int64_t> offset_P_Device;
cutlass::DeviceAllocation<ElementQ *> ptr_Q;
cutlass::DeviceAllocation<ElementK *> ptr_K;
cutlass::DeviceAllocation<ElementP *> ptr_P;
cutlass::DeviceAllocation<ElementV *> ptr_V;
cutlass::DeviceAllocation<ElementO *> ptr_O;
public:
//
// Methods
//
TestbedAttention(
Options &options_,
cutlass::Distribution::Kind init_Q_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_K_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_P_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_V_ = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind init_O_ = cutlass::Distribution::Uniform,
uint32_t seed_ = 3080
):
options(options_), init_Q(init_Q_), init_K(init_K_), init_P(init_P_), init_V(init_V_), init_O(init_O_), seed(seed_) { }
int problem_count() const {
return (options.head_number * options.batch_size);
}
private:
/// Helper to initialize a tensor view
template <typename Element>
void initialize_tensor_(
Element *ptr,
size_t capacity,
cutlass::Distribution::Kind dist_kind,
uint32_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
Element scope_max, scope_min;
int bits_input = cutlass::sizeof_bits<Element>::value;
int bits_output = cutlass::sizeof_bits<ElementP>::value;
if (bits_input == 1) {
scope_max = 2;
scope_min = 0;
} else if (bits_input <= 8) {
scope_max = 2;
scope_min = -2;
} else if (bits_output == 16) {
scope_max = 8;
scope_min = -8;
} else {
scope_max = 8;
scope_min = -8;
}
cutlass::reference::device::BlockFillRandomUniform(
ptr, capacity, seed, scope_max, scope_min, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::device::BlockFillRandomGaussian(
ptr, capacity, seed, Element(), Element(0.5f));
}
else if (dist_kind == cutlass::Distribution::Sequential) {
// Fill with increasing elements
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(1), Element());
}
else {
// Fill with all 1s
cutlass::reference::device::BlockFillSequential(
ptr, capacity, Element(), Element(1));
}
}
/// Initializes data structures
void initialize_() {
//
// Set scalors for the mha example
//
options.alpha0 = 1.0f / sqrt(float(options.head_size));
options.alpha1 = 1.0f;
options.beta = 0;
//
// Choose random problem sizes
//
// construct a few problems of random sizes
srand(seed);
int64_t total_elements_Q = 0;
int64_t total_elements_K = 0;
int64_t total_elements_P = 0;
int64_t total_elements_V = 0;
int64_t total_elements_O = 0;
ldq_host.resize(problem_count());
ldk_host.resize(problem_count());
ldp_host.resize(problem_count());
ldv_host.resize(problem_count());
ldo_host.resize(problem_count());
seqlen_host.resize(problem_count());
// Create tensors in BMHK format, where
// B = batch_size
// M = sequence length
// H = num_heads
// K = embedding size per head
int64_t batch_offset_Q, batch_offset_K, batch_offset_V, batch_offset_O;
for (int32_t b = 0; b < options.batch_size; ++b) {
batch_offset_Q = total_elements_Q;
batch_offset_K = total_elements_K;
batch_offset_V = total_elements_V;
batch_offset_O = total_elements_O;
for (int32_t h = 0; h < options.head_number; ++h) {
int32_t i = h + b * options.head_number;
auto problem0 = options.problem_sizes0.at(i);
auto problem1 = options.problem_sizes1.at(i);
ldq_host.at(i) = LayoutQ::packed({problem0.m(), options.head_number * problem0.k()}).stride(0);
ldk_host.at(i) = LayoutK::packed({options.head_number * problem0.k(), problem0.n()}).stride(0);
ldp_host.at(i) = LayoutP::packed({problem0.m(), problem0.n()}).stride(0);
ldv_host.at(i) = LayoutV::packed({problem1.k(), options.head_number * problem1.n()}).stride(0);
ldo_host.at(i) = LayoutO::packed({problem1.m(), options.head_number * problem1.n()}).stride(0);
// m = n for attention problems.
seqlen_host.at(i) = problem0.m();
offset_Q.push_back(batch_offset_Q + h * problem0.k());
offset_K.push_back(batch_offset_K + h * problem0.k());
offset_P.push_back(total_elements_P);
offset_V.push_back(batch_offset_V + h * problem0.k());
offset_O.push_back(batch_offset_O + h * problem1.n());
int64_t elements_Q = problem0.m() * problem0.k();
int64_t elements_K = problem0.k() * problem0.n();
int64_t elements_P = problem0.m() * problem0.n();
int64_t elements_V = problem1.k() * problem1.n();
int64_t elements_O = problem1.m() * problem1.n();
total_elements_Q += elements_Q;
total_elements_K += elements_K;
total_elements_P += elements_P;
total_elements_V += elements_V;
total_elements_O += elements_O;
}
}
problem_sizes_device0.reset(problem_count());
problem_sizes_device1.reset(problem_count());
problem_sizes_device0.copy_from_host(options.problem_sizes0.data());
problem_sizes_device1.copy_from_host(options.problem_sizes1.data());
if (options.use_mask) {
problem_sizes_device0_real.reset(problem_count());
problem_sizes_device0_real.copy_from_host(options.problem_sizes0_real.data());
}
ldq.reset(problem_count());
ldk.reset(problem_count());
ldp.reset(problem_count());
ldv.reset(problem_count());
ldo.reset(problem_count());
seqlen.reset(problem_count());
ldq.copy_from_host(ldq_host.data());
ldk.copy_from_host(ldk_host.data());
ldp.copy_from_host(ldp_host.data());
ldv.copy_from_host(ldv_host.data());
ldo.copy_from_host(ldo_host.data());
seqlen.copy_from_host(seqlen_host.data());
//
// Assign pointers
//
block_Q.reset(total_elements_Q);
block_K.reset(total_elements_K);
block_P.reset(total_elements_P);
block_V.reset(total_elements_V);
block_O.reset(total_elements_O);
offset_P_Device.reset(problem_count());
// sync offset with device
cutlass::device_memory::copy_to_device(offset_P_Device.get(), offset_P.data(), offset_P.size());
std::vector<ElementQ *> ptr_Q_host(problem_count());
std::vector<ElementK *> ptr_K_host(problem_count());
std::vector<ElementP *> ptr_P_host(problem_count());
std::vector<ElementV *> ptr_V_host(problem_count());
std::vector<ElementO *> ptr_O_host(problem_count());
std::vector<ElementNorm *> ptr_norm_host(problem_count());
std::vector<ElementSum *> ptr_sum_host(problem_count());
for (int32_t i = 0; i < problem_count(); ++i) {
ptr_Q_host.at(i) = block_Q.get() + offset_Q.at(i);
ptr_K_host.at(i) = block_K.get() + offset_K.at(i);
ptr_P_host.at(i) = block_P.get() + offset_P.at(i);
ptr_V_host.at(i) = block_V.get() + offset_V.at(i);
ptr_O_host.at(i) = block_O.get() + offset_O.at(i);
}
ptr_Q.reset(problem_count());
ptr_Q.copy_from_host(ptr_Q_host.data());
ptr_K.reset(problem_count());
ptr_K.copy_from_host(ptr_K_host.data());
ptr_P.reset(problem_count());
ptr_P.copy_from_host(ptr_P_host.data());
ptr_V.reset(problem_count());
ptr_V.copy_from_host(ptr_V_host.data());
ptr_O.reset(problem_count());
ptr_O.copy_from_host(ptr_O_host.data());
//
// Initialize the problems of the workspace
//
initialize_tensor_(block_Q.get(), total_elements_Q, init_Q, seed + 1);
initialize_tensor_(block_K.get(), total_elements_K, init_K, seed + 2);
initialize_tensor_(block_V.get(), total_elements_V, init_V, seed + 3);
}
template<typename Element>
bool verify_tensor_(std::vector<Element> vector_Input, \
std::vector<Element> vector_Input_Ref,
int64_t verify_length = -1) {
int64_t size = (vector_Input.size() < vector_Input_Ref.size()) ? vector_Input.size() : vector_Input_Ref.size();
size = (verify_length == -1) ? size : verify_length;
// 0.05 for absolute error
float abs_tol = 5e-2f;
// 10% for relative error
float rel_tol = 1e-1f;
for (int64_t i = 0; i < size; ++i) {
float diff = (float)(vector_Input.at(i) - vector_Input_Ref.at(i));
float abs_diff = fabs(diff);
float abs_ref = fabs((float)vector_Input_Ref.at(i) + 1e-5f);
float relative_diff = abs_diff / abs_ref;
if ( (isnan(vector_Input_Ref.at(i)) || isnan(abs_diff) || isinf(abs_diff)) || (abs_diff > abs_tol && relative_diff > rel_tol)) {
printf("[%d/%d] diff = %f, rel_diff = %f, {computed=%f, ref=%f}.\n", int(i), int(size), abs_diff, relative_diff, (float)(vector_Input.at(i)), (float)(vector_Input_Ref.at(i)));
return false;
}
}
return true;
}
/// Verifies the result is a GEMM
bool verify_() {
bool passed = true;
for (int32_t b = 0; b < options.batch_size; ++b) {
int32_t i = b * options.head_number;
// Problem size is the same for all heads
cutlass::gemm::GemmCoord problem0 = options.problem_sizes0.at(b * options.head_number);
cutlass::gemm::GemmCoord problem1 = options.problem_sizes1.at(b * options.head_number);
MatrixCoord extent_Q{problem0.m(), problem0.k()};
MatrixCoord extent_K{problem0.k(), problem0.n()};
MatrixCoord extent_P{problem0.m(), problem0.n()};
MatrixCoord extent_V{problem1.k(), problem1.n()};
MatrixCoord extent_O{problem1.m(), problem1.n()};
LayoutO layout_O(ldo_host.at(i));
std::vector<ElementO> matrix_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_O.data(), block_O.get() + offset_O.at(i), matrix_O.size());
cutlass::DeviceAllocation<ElementO> block_Ref_O(layout_O.capacity(extent_O));
for (int32_t h = 0; h < options.head_number; ++h) {
i = h + b * options.head_number;
LayoutQ layout_Q(ldq_host.at(i));
LayoutK layout_K(ldk_host.at(i));
LayoutP layout_P(ldp_host.at(i));
LayoutV layout_V(ldv_host.at(i));
cutlass::TensorView<ElementQ, LayoutQ> view_Q(block_Q.get() + offset_Q.at(i), layout_Q, extent_Q);
cutlass::TensorView<ElementK, LayoutK> view_K(block_K.get() + offset_K.at(i), layout_K, extent_K);
cutlass::TensorView<ElementV, LayoutV> view_V(block_V.get() + offset_V.at(i), layout_V, extent_V);
cutlass::TensorView<ElementO, LayoutO> view_Ref_O_device(block_Ref_O.get() + offset_O.at(i) - offset_O.at(b * options.head_number), layout_O, extent_O);
cutlass::DeviceAllocation<ElementP> block_Ref_P(layout_P.capacity(extent_P));
cutlass::TensorView<ElementP, LayoutP> view_Ref_P_device(block_Ref_P.get(), layout_P, extent_P);
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementQ, LayoutQ,
ElementK, LayoutK,
ElementP, LayoutP,
ElementCompute, ElementAccumulator
>(
problem0,
ElementAccumulator(options.alpha0),
view_Q,
Attention::MM0::Mma::kTransformA,
view_K,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_P_device,
view_Ref_P_device,
ElementAccumulator(0)
);
// Compute softmax for P. We need to explicitly compute softmax
// over P because softmax is fused to the second GEMM in the
// profiled implementation.
std::vector<ElementP> matrix_Ref(layout_P.capacity(extent_P));
cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref_P.get(), matrix_Ref.size());
cutlass::TensorView<ElementP, LayoutP> view_Ref_host(matrix_Ref.data(), layout_P, extent_P);
std::vector<ElementNorm> vector_Norm_Ref(problem0.m());
std::vector<ElementSum> vector_Sum_Ref(problem0.m());
int n_dim = options.use_mask ? options.problem_sizes0_real.at(i).n() : problem0.n();
// Compute softmax for reference matrix
for (int m = 0; m < problem0.m(); m++) {
int n_dim_row = n_dim;
if (options.causal) {
n_dim_row = std::min(m + 1, n_dim);
}
ElementSoftmaxCompute max = ElementSoftmaxCompute(view_Ref_host.ref().at({m, 0}));
for (int n = 1; n < n_dim_row; n++) {
max = std::max(max, ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})));
}
vector_Norm_Ref.at(m) = ElementNorm(max);
ElementSoftmaxCompute sum = ElementSoftmaxCompute();
for (int n = 0; n < n_dim_row; n++) {
sum += std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max );
}
ElementSoftmaxCompute inv_sum = ElementSoftmaxCompute(1.0f / sum);
vector_Sum_Ref.at(m) = ElementSum(inv_sum);
for (int n = 0; n < n_dim_row; n++) {
view_Ref_host.ref().at({m, n}) = ElementP(
std::exp( ElementSoftmaxCompute(view_Ref_host.ref().at({m, n})) - max ) * inv_sum
);
}
// Mask out the rest of the attention matrix
for (int n = n_dim_row; n < n_dim; ++n) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
// when not using mask, problem_real and problem share the same sizes
if (options.use_mask) {
for (int m = 0; m < problem0.m(); m++) {
for (int n = n_dim; n < problem0.n(); n++) {
view_Ref_host.ref().at({m, n}) = ElementP(0);
}
}
}
cutlass::device_memory::copy_to_device(block_Ref_P.get(), matrix_Ref.data(), matrix_Ref.size());
// Reference GEMM
cutlass::reference::device::GemmComplex<
ElementP, LayoutP,
ElementV, LayoutV,
ElementO, LayoutO,
ElementCompute, ElementAccumulator
>(
problem1,
ElementAccumulator(options.alpha1),
view_Ref_P_device,
Attention::MM0::Mma::kTransformA,
view_V,
Attention::MM0::Mma::kTransformB,
ElementAccumulator(options.beta),
view_Ref_O_device,
view_Ref_O_device,
ElementAccumulator(0)
);
}
// Copy to host memory
std::vector<ElementO> matrix_Ref_O(layout_O.capacity(extent_O));
cutlass::device_memory::copy_to_host(matrix_Ref_O.data(), block_Ref_O.get(), matrix_Ref_O.size());
// printf("Pb %d: \n Q=(offset=%d, ldq=%d)\n K=(offset=%d, ldk=%d)\n O=(offset=%d, ldo=%d)\n",
// int(i), int(offset_Q[i]), int(ldq_host[i]), int(offset_K[i]), int(ldk_host[i]), int(offset_O[i]), int(ldo_host[i]));
bool verified_O = false;
if (!verified_O) {
verified_O = verify_tensor_<ElementO>(matrix_O, matrix_Ref_O);
}
passed = passed && verified_O;
if (!passed) {
std::cerr << "\n***\nError - problem " << i << " (batch " << b << ") failed the QA check\n***\n" << std::endl;
if (!verified_O) {
std::cout << "Final matrix output is incorrect" << std::endl;
}
return passed;
}
}
return passed;
}
public:
/// Executes a CUTLASS Attention kernel and measures runtime.
Result profile() {
Result result;
result.passed = false;
// Initialize the problem
initialize_();
typename Attention::Params p;
{ // set parameters
p.query_ptr = block_Q.get();
p.key_ptr = block_K.get();
p.value_ptr = block_V.get();
p.logsumexp_ptr = nullptr; // Only needed for bw
p.output_accum_ptr = nullptr;
if (Attention::kNeedsOutputAccumulatorBuffer) {
cudaMalloc(&p.output_accum_ptr, block_O.size() * sizeof(typename Attention::output_accum_t));
}
p.output_ptr = block_O.get();
// TODO: support arbitrary seq lengths
// if (cu_seqlens_q.has_value()) {
// p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
// p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
// }
p.scale = options.alpha0;
p.num_heads = options.head_number;
p.num_batches = options.batch_size;
p.head_dim = options.head_size;
p.head_dim_value = options.head_size_v;
p.num_queries = options.seq_length;
p.num_keys = options.seq_length_kv;
if (options.causal) {
p.custom_mask_type = Attention::CausalFromTopLeft;
}
// All tensors are in BMHK shapes
p.q_strideH = options.head_size;
p.k_strideH = options.head_size;
p.v_strideH = options.head_size_v;
p.q_strideM = int32_t(ldq_host[0]);
p.k_strideM = int32_t(ldk_host[0]);
p.v_strideM = int32_t(ldv_host[0]);
p.q_strideB = p.q_strideM * options.seq_length;
p.k_strideB = p.k_strideM * options.seq_length_kv;
p.v_strideB = p.v_strideM * options.seq_length_kv;
p.o_strideM = p.head_dim_value * p.num_heads;
}
// launch kernel :)
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
int smem_bytes = sizeof(typename Attention::SharedStorage);
if (smem_bytes > 0xc000) {
cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
}
if (!Attention::check_supported(p)) {
std::cerr << "Kernel does not support these inputs" << std::endl;
return result;
}
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
// Wait for completion
result.error = cudaDeviceSynchronize();
if (result.error != cudaSuccess) {
std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error);
return result;
}
//
// Verify correctness
//
result.passed = true;
if (options.reference_check) {
result.passed = verify_();
}
//
// Warm-up run
//
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "Failed to run CUTLASS Attention kernel." << std::endl;
return result;
}
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMM operations
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes>>>(p);
}
//
// Stop profiling loop
//
// Record an event when the GEMM operations have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
//
// Cleanup
//
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << std::endl;
std::cout << "CUTLASS Attention:\n"
<< "====================================================" << std::endl;
std::cout << " " << " {seq length Q, seq length KV, head size, head size V, head number, batch size} = {" << options.seq_length \
<< ", " << options.seq_length_kv << ", " << options.head_size << ", " << options.head_size_v << ", " << options.head_number\
<< ", " << options.batch_size << "}." << std::endl;
std::cout << std::endl;
std::cout << " " << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " " << "GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
int kQueriesPerBlock,
int kKeysPerBlock,
bool kSingleValueIteration
>
int run_attention(Options& options) {
using Attention = AttentionKernel<
cutlass::half_t, // scalar_t
cutlass::arch::Sm80, // ArchTag
true, // Memory is aligned
kQueriesPerBlock,
kKeysPerBlock,
kSingleValueIteration,
false, // Supports dropout
false // Supports bias
>;
//
// Test and profile
//
TestbedAttention<Attention> testbed(options);
Result result = testbed.profile();
if (!result.passed) {
std::cout << "Profiling CUTLASS attention has failed.\n";
std::cout << "\nFailed\n";
return -1;
}
std::cout << "\nPassed\n";
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) {
//
// This example requires an NVIDIA Ampere-architecture GPU.
//
std::cout
<< "CUTLASS's CUTLASS Attention example requires a GPU of NVIDIA's Ampere Architecture or "
<< "later (compute capability 80 or greater).\n";
return 0;
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.error) {
std::cerr << "Aborting execution." << std::endl;
return -1;
}
if (options.use_mask) {
std::cerr << "--use_mask is not supported at the moment\n";
return -2;
}
if (options.alignment != 1) {
std::cerr << "--alignment=1 is the only supported value\n";
return -2;
}
// Determine kernel configuration based on head size.
// If head size is less than or equal to 64, each block operates over 64 queries and
// 64 keys, and partial results can be stored in the register file.
// If head size is greater than 64, each block operates over 32 queries and 128 keys,
// and partial results are stored in shared memory.
if (options.head_size_v > 64) {
static int const kQueriesPerBlock = 32;
static int const kKeysPerBlock = 128;
if (options.head_size_v <= kKeysPerBlock) {
return run_attention<kQueriesPerBlock, kKeysPerBlock, true>(options);
} else {
return run_attention<kQueriesPerBlock, kKeysPerBlock, false>(options);
}
} else {
static int const kQueriesPerBlock = 64;
static int const kKeysPerBlock = 64;
return run_attention<kQueriesPerBlock, kKeysPerBlock, true>(options);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
aa6dfef7854e7e16374a84381aaf0517b5ec53c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "Re3000_4"
#define NUMGPU 1
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 64
#define YDIM 124
#define ZDIM 13
#define TMAX 3000000
#define STARTF 3000000
#define DYNY1 1200000
#define DYNY2 1
#define KP 0.3f //p-control constant
#define OBSTR1 31.f
#define OBSTX1 32.5f
#define OBSTY1 31.5f
#define OBSTZ1 32.5f
#define OBSTR2 5.f
#define OBSTX2 31.5f
#define OBSTY2 35.5f
#define OBSTZ2 32.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 16.25f //minimum x coord of LR
#define XLRDIM 64 //number of nodes in x
#define LRY0 31.25f
#define YLRDIM 80
#define LRZ0 -0.75f
#define ZLRDIM 8
#define ORDER 2 //order of accuracy of interpolation
#define RE 15000.f//2000.f//100.f;
#define UMAX 0.06f
#define SmagLES 1 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define CS 0.02f
#define DPDX 0.f
#define DPDY -7.0e-8
#define VELAV 1
#define START_VELAV 200000
#define START_VELFLUC 1600000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
}
return value;
}
inline __device__ int ImageFcn(int x, int y, int z, int t)
{
int value = 0;
if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 5000)
value = 10;
if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10)
value = 10;
//if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
if(x == 0)
value = 1;//50;//400;
else if(x == XDIM-1)
value = 1;//51;//300;
// else if(y == 0)
// value = 200; //52;//1;//22;
//// else if(y == DYNY1)
//// value = 54;//1;//22;
// else if(y == YDIM-1)
// value = 100;
//if(z == ZDIM-1) value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
x = x-0.5f;
y = y-0.5f;
//float H = 41.f;
return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
// float radius = (YDIM-1-1)*0.5f;
// float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
// return (result);
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
if(y > YDIM-1) y = 0;
if(y < 0) y = YDIM-1;
//if(y == DYNY1+1) y = 0; //YDIM-1;
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,19*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
if(y > YDIM-1) y = 0;
if(y < 0) y = YDIM-1;
//if(y == DYNY1+1) y = 0; //YDIM-1;
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void AddForce(float* f, float dpdy)
{
// f[1] -= 0.0555555556f*3.f*DPDX;
// f[3] += 0.0555555556f*3.f*DPDX;
// f[5] -= 0.0277777778f*3.f*DPDX;
// f[6] += 0.0277777778f*3.f*DPDX;
// f[7] += 0.0277777778f*3.f*DPDX;
// f[8] -= 0.0277777778f*3.f*DPDX;
// f[10]-= 0.0277777778f*3.f*DPDX;
// f[12]+= 0.0277777778f*3.f*DPDX;
// f[15]-= 0.0277777778f*3.f*DPDX;
// f[17]+= 0.0277777778f*3.f*DPDX;
f[2] -= 0.0555555556f*3.f*dpdy;
f[4] += 0.0555555556f*3.f*dpdy;
f[5] -= 0.0277777778f*3.f*dpdy;
f[6] -= 0.0277777778f*3.f*dpdy;
f[7] += 0.0277777778f*3.f*dpdy;
f[8] += 0.0277777778f*3.f*dpdy;
f[11]-= 0.0277777778f*3.f*dpdy;
f[13]+= 0.0277777778f*3.f*dpdy;
f[16]-= 0.0277777778f*3.f*dpdy;
f[18]+= 0.0277777778f*3.f*dpdy;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
inline __device__ void bgk_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 3.f*rho-5.5f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[10] = -0.5f*meq[9]*0.333333333333f;
meq[11] = v*v-w*w;
meq[12] = -0.5f*meq[11];
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from 19 moments
inline __device__ void StrainRate(float* S, float* m_strain, float dx)
{
float rho = m_strain[0];
float u = m_strain[3];
float v = m_strain[5];
float w = m_strain[7];
float m1 = m_strain[1 ]+11.f*rho-19.f*(u*u+v*v+w*w);
float m9 = m_strain[9 ]-(2.f*u*u-(v*v+w*w));
float m11= m_strain[11]-(v*v-w*w);
float m13= m_strain[13]-(u*v);
float m14= m_strain[14]-(v*w);
float m15= m_strain[15]-(u*w);
S[0] = -0.026315789f*( m1+19.f* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*(m9+3.f*m11));
S[3] = -1.5f*m13;
S[4] = -1.5f*m14;
S[5] = -1.5f*m15;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void ScaleMoments_bgk(float* m, float SF)
{
float rho,u,v,w;
rho = m[0]; u = m[3]; v = m[5]; w = m[7];
m[ 1] = m[ 1]*SF+(1.f-SF)*(-11.f*rho+19.f*(u*u+v*v+w*w));
m[ 2] = m[ 2]*SF+(1.f-SF)*(3.f*rho-5.5f*(u*u+v*v+w*w) );
m[ 4] = m[ 4]*SF+(1.f-SF)*(-0.666666667f*u );
m[ 6] = m[ 6]*SF+(1.f-SF)*(-0.666666667f*v );
m[ 8] = m[ 8]*SF+(1.f-SF)*(-0.666666667f*w );
m[ 9] = m[ 9]*SF+(1.f-SF)*(2.f*u*u-(v*v+w*w) );
m[10] = m[10]*SF+(1.f-SF)*(-0.5f*(2.f*u*u-(v*v+w*w))*0.333333333333f);
m[11] = m[11]*SF+(1.f-SF)*(v*v-w*w );
m[12] = m[12]*SF+(1.f-SF)*(-0.5f*(v*v-w*w) );
m[13] = m[13]*SF+(1.f-SF)*(u*v );
m[14] = m[14]*SF+(1.f-SF)*(v*w );
m[15] = m[15]*SF+(1.f-SF)*(u*w );
m[16] = m[16]*SF;
m[17] = m[17]*SF;
m[18] = m[18]*SF;
}
inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f)
{
float tau_f = 1.f/omega_f;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_c = tau_f+0.5f+12.f*Smag_f*CS;
tau_c *= 0.5f;
float omega_c = 1.f/tau_c;
tau_f = tau_f+Smag_f*CS;
omega_f = 1.f/tau_f;
SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f));
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c)
{
float tau_c = 1.f/omega_c;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS;
float omega_f = 1.f/tau_f;
omega_f = 1.f/tau_f;
tau_c = tau_c+Smag_c*CS;
omega_c = 1.f/tau_c;
SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f);
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_collide(float* f, float omega, float dpdy)
{
float feq[19];
float u,v,w,rho;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
rho=f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
float usqr = u*u+v*v+w*w;
feq[0 ]=(0.3333333333f*(rho-1.5f*usqr));
feq[1 ]=(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
feq[2 ]=(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
feq[3 ]=(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
feq[4 ]=(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
feq[5 ]=(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
feq[6 ]=(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
feq[7 ]=(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
feq[8 ]=(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
feq[9 ]=(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
feq[10]=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
feq[11]=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
feq[12]=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
feq[13]=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
feq[14]=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
feq[15]=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
feq[16]=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
feq[17]=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
feq[18]=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
if(SmagLES == 1)
{
float PI11 = (f[1 ]-feq[1 ])+(f[3 ]-feq[3 ])+(f[5 ]-feq[5 ])+
(f[6 ]-feq[6 ])+(f[7 ]-feq[7 ])+(f[8 ]-feq[8 ])+
(f[10]-feq[10])+(f[12]-feq[12])+(f[15]-feq[15])+
(f[17]-feq[17]);
float PI22 = (f[2 ]-feq[2 ])+(f[4 ]-feq[4 ])+(f[5 ]-feq[5 ])+
(f[6 ]-feq[6 ])+(f[7 ]-feq[7 ])+(f[8 ]-feq[8 ])+
(f[11]-feq[11])+(f[13]-feq[13])+(f[16]-feq[16])+
(f[18]-feq[18]);
float PI33 = (f[9 ]-feq[9 ])+(f[14]-feq[14])+(f[10]-feq[10])+
(f[12]-feq[12])+(f[15]-feq[15])+(f[17]-feq[17])+
(f[11]-feq[11])+(f[13]-feq[13])+(f[16]-feq[16])+
(f[18]-feq[18]);
float PI12 = (f[5 ]-feq[5 ])+(f[7 ]-feq[7 ])-(f[6 ]-feq[6 ])-(f[8 ]-feq[8 ]);
float PI13 = (f[10]-feq[10])+(f[17]-feq[17])-(f[12]-feq[12])-(f[15]-feq[15]);
float PI23 = (f[11]-feq[11])+(f[18]-feq[18])-(f[13]-feq[13])-(f[16]-feq[16]);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[0 ] -=omega*(f[0 ]-feq[0 ]);
f[1 ] -=omega*(f[1 ]-feq[1 ]);
f[2 ] -=omega*(f[2 ]-feq[2 ]);
f[3 ] -=omega*(f[3 ]-feq[3 ]);
f[4 ] -=omega*(f[4 ]-feq[4 ]);
f[5 ] -=omega*(f[5 ]-feq[5 ]);
f[6 ] -=omega*(f[6 ]-feq[6 ]);
f[7 ] -=omega*(f[7 ]-feq[7 ]);
f[8 ] -=omega*(f[8 ]-feq[8 ]);
f[9 ] -=omega*(f[9 ]-feq[9 ]);
f[10] -=omega*(f[10]-feq[10]);
f[11] -=omega*(f[11]-feq[11]);
f[12] -=omega*(f[12]-feq[12]);
f[13] -=omega*(f[13]-feq[13]);
f[14] -=omega*(f[14]-feq[14]);
f[15] -=omega*(f[15]-feq[15]);
f[16] -=omega*(f[16]-feq[16]);
f[17] -=omega*(f[17]-feq[17]);
f[18] -=omega*(f[18]-feq[18]);
AddForce(f,dpdy);
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float v)
{
float m[19];
float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u, int t)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
//if(t == 1000 || t == 2000 || t == 3000) w = 0.01f;
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, float& wAv, int t)
{
float u,v,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
wAv = (wAv*(t-START_VELAV)+w)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& wAv, float& ufluc, float& vfluc, float& wfluc, int t)
{
float u,v,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
w = (w-wAv)*(w-wAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
wfluc = (wfluc*(t-START_VELFLUC)+w)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord,0);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
bgk_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z);
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
bgk_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,1,y,pitch)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,1,pitch)];
}
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2),t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,1,y,pitch)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,1,pitch)];
}
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velAv_w, float* velFluc_u, float* velFluc_v, float* velFluc_w, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)];
}
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,w_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
velAv_w[x+y*pitch+(z+1)*pitch*YDIM] = w_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_fluc = velFluc_w[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,w_Av,u_fluc,v_fluc,w_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
velFluc_w[x+y*pitch+(z+1)*pitch*YDIM] = w_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
//float zcoord = LRZ0+GPU*LRFACTOR*z;
float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1);
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][19];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
//int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[19];
for(int i = 0; i<19; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][3];m05=mom_c[xm][0][0][5];m07=mom_c[xm][0][0][7];
m13=mom_c[xp][0][0][3];m15=mom_c[xp][0][0][5];m17=mom_c[xp][0][0][7];
m23=mom_c[xm][1][0][3];m25=mom_c[xm][1][0][5];m27=mom_c[xm][1][0][7];
m33=mom_c[xp][1][0][3];m35=mom_c[xp][1][0][5];m37=mom_c[xp][1][0][7];
m43=mom_c[xm][0][1][3];m45=mom_c[xm][0][1][5];m47=mom_c[xm][0][1][7];
m53=mom_c[xp][0][1][3];m55=mom_c[xp][0][1][5];m57=mom_c[xp][0][1][7];
m63=mom_c[xm][1][1][3];m65=mom_c[xm][1][1][5];m67=mom_c[xm][1][1][7];
m73=mom_c[xp][1][1][3];m75=mom_c[xp][1][1][5];m77=mom_c[xp][1][1][7];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[3] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[5] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[7] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
//InvertPhysicalMoments(f,mom,SF);
//InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c);
ScaleMoments_bgk(mom,SF);
// mom[0] = 2.f;
// mom[3] = 0.1f;
// mom[5] = 0.1f;
// mom[7] = 0.1f;
InvertMoments(f,mom);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
//if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) &&
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][19];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
Moments(f,mom[0]);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
Moments(f,mom[1]);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
Moments(f,mom[2]);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
Moments(f,mom[3]);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
Moments(f,mom[4]);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
Moments(f,mom[5]);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
Moments(f,mom[6]);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
Moments(f,mom[7]);
StrainRate(S_f[7],mom[7],1.f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
Moments(f,mom[0]);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
Moments(f,mom[1]);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
Moments(f,mom[2]);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
Moments(f,mom[3]);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
Moments(f,mom[4]);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
Moments(f,mom[5]);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
Moments(f,mom[6]);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
Moments(f,mom[7]);
StrainRate(S_f[7],mom[7],1.f);
}
if(ORDER == 1){
for(int i = 0; i<19; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][3];m05=mom[0][5];m07=mom[0][7];
m13=mom[1][3];m15=mom[1][5];m17=mom[1][7];
m23=mom[2][3];m25=mom[2][5];m27=mom[2][7];
m33=mom[3][3];m35=mom[3][5];m37=mom[3][7];
m43=mom[4][3];m45=mom[4][5];m47=mom[4][7];
m53=mom[5][3];m55=mom[5][5];m57=mom[5][7];
m63=mom[6][3];m65=mom[6][5];m67=mom[6][7];
m73=mom[7][3];m75=mom[7][5];m77=mom[7][7];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<19; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][3] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][5] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][7] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
//InvertPhysicalMoments(f,mom[0],SF);
//InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f);
ScaleMoments_bgk(mom[0],SF);
InvertMoments(f,mom[0]);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
//int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z,0);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
__global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f[19];
float v_av = 0;
__shared__ float sumV[BLOCKSIZEX];
syncthreads();
if(z == 0){
for(int i = 0; i<19; i++)
f[i] = gA[buff_mem(i,x,DYNY1,pitch)];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
f[i] = hA[buff_mem(i,x,DYNY1,pitch)];
}
else{
for(int i = 0; i<19; i++)
f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)];
}
sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18];
syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Av_V[t],sumV[0]);
}
}
void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<","<<velAv[2][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<velFluc[2][i+j*XDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<", "
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
if(k == 1 && GPU == 0){
outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<","
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<velAv[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl;
}}
}
void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<","<<velAv[2][i+j*XLRDIM]
<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<velFluc[2][i+j*XLRDIM]
<<","<<0<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR;
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "<<velAv [2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
if(k == 3 && GPU == 0){
outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "<<velAv [2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "<<velAv[2][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteAvV(float *v, ofstream &output)
{
for(int i = 0; i<TMAX; i++){
output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"DPDY \t: "<<DPDY<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; hipGetDeviceCount(&GPU_N);
GPU_N=NUMGPU;
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
outputAvV.open ((FileName+".vel").c_str());
ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice;
for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){
//string filenum = to_string(i);
char str[10];
snprintf(str,10,"%i",i);
outputpart[i].open ((FileName+"_part"+str+".dat").c_str());
}
outputslice.open ((FileName+"_slice.dat").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ);
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
float *Av_V_h[GPU_N];
float *Av_V_d[GPU_N];
float dpdy = DPDY;
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
Av_V_h[n] = (float *)malloc(TMAX*sizeof(float));
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) hipDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
hipMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
hipMalloc((void **) & Av_V_d[n],TMAX*sizeof(float));
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int j=0;j<(ForceTime);j++)
Av_V_h[n][j] = 0;
for(int i = 0; i<2; i++){
hipMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
}
hipMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),hipMemcpyHostToDevice);
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initialize), dim3(grid),dim3(threads), 0, 0, f_d[n][i],pitch_e,zInner,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_d[n][i],pitch_e, 1,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_d[n][i],pitch_e, 1,GPU_N);
}
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_temp[n],pitch_e, 1,GPU_N);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_temp[n],pitch_e, 1,GPU_N);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*19*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
hipSetDevice(n);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*19*sizeof(float));
hipMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
hipMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
hipMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
hipMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
hipMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initializeLR), dim3(LR_grid),dim3(LR_threads), 0, 0, f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_d[n][i],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_temp[n],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
hipFuncSetCacheConfig(InterpCF,hipFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
size_t mem_avail, mem_total;
hipMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][0],n,&g_d[ (n+1)%GPU_N][A][0], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][0],n,&h_d[abs(n-1)%GPU_N][A][0],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn), dim3(grid),dim3(threads),0,stream_inner[n], f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velAv_d[n][2],velFluc_d[n][0],velFluc_d[n][1],velFluc_d[n][2],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
hipSetDevice(n);
hipLaunchKernelGGL(( update_top), dim3(g_grid), dim3(threads), 0, stream_halo [n], h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy);
hipLaunchKernelGGL(( update_bot), dim3(g_grid), dim3(threads), 0, stream_halo [n], g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy);
}
if(t%100 == 0 && t>1000)
{
for(int n = 0; n<GPU_N; n++)
hipDeviceSynchronize();
for(int n = 0; n<GPU_N; n++)
{
hipLaunchKernelGGL(( AverageV), dim3(AvV_grid), dim3(threads), 0, 0, f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t);
}
for(int n = 0; n<GPU_N; n++)
hipMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),hipMemcpyDeviceToHost);
float Av_V = 0;
for(int n = 0; n<GPU_N; n++)
Av_V += Av_V_h[n][t];
Av_V /= (XDIM-2)*ZDIM;
float diff;
diff = (Av_V-UMAX)/UMAX;
dpdy += diff*KP*abs(DPDY);
//dpdy = max(DPDY*)
// if(Av_V < UMAX*0.995f)
// dpdy *= 1.01f;
// else if(Av_V > UMAX*1.005f)
// dpdy *= 0.99f;
if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl;
}
//hipDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn_LR), dim3(LR_grid),dim3(LR_threads),0,stream_inner[n], f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
hipLaunchKernelGGL(( update_bot_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//hipMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpCF), dim3(Interp_grid),dim3(Interp_threads),0,stream_inner[n], f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner);
//hipDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpFC), dim3(Interp_grid_c),dim3(threads),0,stream_halo[n], f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
}//end time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
}
hipMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,hipMemcpyDeviceToHost);
WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
outputpart[n]<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
if(n > 0){
for(int j=0;j<TMAX;j++)
Av_V_h[0][j] += Av_V_h[n][j];
}
for(int i = 0; i<2; i++){
hipFree(f_d[n][i]);
hipFree(g_d[n][i]);
hipFree(h_d[n][i]);
}
hipFree(f_d[n]);
hipFree(g_d[n]);
hipFree(h_d[n]);
hipFree(g_temp[n]);
hipFree(h_temp[n]);
for(int i=0;i<3;i++)
hipFree(F_d[n][i]);
hipFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
//WriteAvV(Av_V_h[0],outputAvV);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
//hipMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
}
WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
outputpart[GPU_N+n]<<endl;
for(int i = 0; i<2; i++){
hipFree(f_LR_d[n][i]);
hipFree(g_LR_d[n][i]);
hipFree(h_LR_d[n][i]);
}
hipFree(f_LR_d[n]);
hipFree(g_LR_d[n]);
hipFree(h_LR_d[n]);
hipFree(g_LR_temp[n]);
hipFree(h_LR_temp[n]);
}
}
return 0;
}
|
aa6dfef7854e7e16374a84381aaf0517b5ec53c4.cu
|
#include <cuda.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "Re3000_4"
#define NUMGPU 1
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 64
#define YDIM 124
#define ZDIM 13
#define TMAX 3000000
#define STARTF 3000000
#define DYNY1 1200000
#define DYNY2 1
#define KP 0.3f //p-control constant
#define OBSTR1 31.f
#define OBSTX1 32.5f
#define OBSTY1 31.5f
#define OBSTZ1 32.5f
#define OBSTR2 5.f
#define OBSTX2 31.5f
#define OBSTY2 35.5f
#define OBSTZ2 32.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 16.25f //minimum x coord of LR
#define XLRDIM 64 //number of nodes in x
#define LRY0 31.25f
#define YLRDIM 80
#define LRZ0 -0.75f
#define ZLRDIM 8
#define ORDER 2 //order of accuracy of interpolation
#define RE 15000.f//2000.f//100.f;
#define UMAX 0.06f
#define SmagLES 1 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define CS 0.02f
#define DPDX 0.f
#define DPDY -7.0e-8
#define VELAV 1
#define START_VELAV 200000
#define START_VELFLUC 1600000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
}
return value;
}
inline __device__ int ImageFcn(int x, int y, int z, int t)
{
int value = 0;
if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 5000)
value = 10;
if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10)
value = 10;
//if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
if(x == 0)
value = 1;//50;//400;
else if(x == XDIM-1)
value = 1;//51;//300;
// else if(y == 0)
// value = 200; //52;//1;//22;
//// else if(y == DYNY1)
//// value = 54;//1;//22;
// else if(y == YDIM-1)
// value = 100;
//if(z == ZDIM-1) value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
x = x-0.5f;
y = y-0.5f;
//float H = 41.f;
return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
// float radius = (YDIM-1-1)*0.5f;
// float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
// return (result);
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
if(y > YDIM-1) y = 0;
if(y < 0) y = YDIM-1;
//if(y == DYNY1+1) y = 0; //YDIM-1;
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,19*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
if(y > YDIM-1) y = 0;
if(y < 0) y = YDIM-1;
//if(y == DYNY1+1) y = 0; //YDIM-1;
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void AddForce(float* f, float dpdy)
{
// f[1] -= 0.0555555556f*3.f*DPDX;
// f[3] += 0.0555555556f*3.f*DPDX;
// f[5] -= 0.0277777778f*3.f*DPDX;
// f[6] += 0.0277777778f*3.f*DPDX;
// f[7] += 0.0277777778f*3.f*DPDX;
// f[8] -= 0.0277777778f*3.f*DPDX;
// f[10]-= 0.0277777778f*3.f*DPDX;
// f[12]+= 0.0277777778f*3.f*DPDX;
// f[15]-= 0.0277777778f*3.f*DPDX;
// f[17]+= 0.0277777778f*3.f*DPDX;
f[2] -= 0.0555555556f*3.f*dpdy;
f[4] += 0.0555555556f*3.f*dpdy;
f[5] -= 0.0277777778f*3.f*dpdy;
f[6] -= 0.0277777778f*3.f*dpdy;
f[7] += 0.0277777778f*3.f*dpdy;
f[8] += 0.0277777778f*3.f*dpdy;
f[11]-= 0.0277777778f*3.f*dpdy;
f[13]+= 0.0277777778f*3.f*dpdy;
f[16]-= 0.0277777778f*3.f*dpdy;
f[18]+= 0.0277777778f*3.f*dpdy;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
inline __device__ void bgk_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 3.f*rho-5.5f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[10] = -0.5f*meq[9]*0.333333333333f;
meq[11] = v*v-w*w;
meq[12] = -0.5f*meq[11];
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from 19 moments
inline __device__ void StrainRate(float* S, float* m_strain, float dx)
{
float rho = m_strain[0];
float u = m_strain[3];
float v = m_strain[5];
float w = m_strain[7];
float m1 = m_strain[1 ]+11.f*rho-19.f*(u*u+v*v+w*w);
float m9 = m_strain[9 ]-(2.f*u*u-(v*v+w*w));
float m11= m_strain[11]-(v*v-w*w);
float m13= m_strain[13]-(u*v);
float m14= m_strain[14]-(v*w);
float m15= m_strain[15]-(u*w);
S[0] = -0.026315789f*( m1+19.f* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*(m9+3.f*m11));
S[3] = -1.5f*m13;
S[4] = -1.5f*m14;
S[5] = -1.5f*m15;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void ScaleMoments_bgk(float* m, float SF)
{
float rho,u,v,w;
rho = m[0]; u = m[3]; v = m[5]; w = m[7];
m[ 1] = m[ 1]*SF+(1.f-SF)*(-11.f*rho+19.f*(u*u+v*v+w*w));
m[ 2] = m[ 2]*SF+(1.f-SF)*(3.f*rho-5.5f*(u*u+v*v+w*w) );
m[ 4] = m[ 4]*SF+(1.f-SF)*(-0.666666667f*u );
m[ 6] = m[ 6]*SF+(1.f-SF)*(-0.666666667f*v );
m[ 8] = m[ 8]*SF+(1.f-SF)*(-0.666666667f*w );
m[ 9] = m[ 9]*SF+(1.f-SF)*(2.f*u*u-(v*v+w*w) );
m[10] = m[10]*SF+(1.f-SF)*(-0.5f*(2.f*u*u-(v*v+w*w))*0.333333333333f);
m[11] = m[11]*SF+(1.f-SF)*(v*v-w*w );
m[12] = m[12]*SF+(1.f-SF)*(-0.5f*(v*v-w*w) );
m[13] = m[13]*SF+(1.f-SF)*(u*v );
m[14] = m[14]*SF+(1.f-SF)*(v*w );
m[15] = m[15]*SF+(1.f-SF)*(u*w );
m[16] = m[16]*SF;
m[17] = m[17]*SF;
m[18] = m[18]*SF;
}
inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f)
{
float tau_f = 1.f/omega_f;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_c = tau_f+0.5f+12.f*Smag_f*CS;
tau_c *= 0.5f;
float omega_c = 1.f/tau_c;
tau_f = tau_f+Smag_f*CS;
omega_f = 1.f/tau_f;
SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f));
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c)
{
float tau_c = 1.f/omega_c;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS;
float omega_f = 1.f/tau_f;
omega_f = 1.f/tau_f;
tau_c = tau_c+Smag_c*CS;
omega_c = 1.f/tau_c;
SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f);
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_collide(float* f, float omega, float dpdy)
{
float feq[19];
float u,v,w,rho;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
rho=f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
float usqr = u*u+v*v+w*w;
feq[0 ]=(0.3333333333f*(rho-1.5f*usqr));
feq[1 ]=(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
feq[2 ]=(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
feq[3 ]=(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
feq[4 ]=(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
feq[5 ]=(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
feq[6 ]=(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
feq[7 ]=(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
feq[8 ]=(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
feq[9 ]=(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
feq[10]=(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
feq[11]=(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
feq[12]=(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
feq[13]=(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
feq[14]=(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
feq[15]=(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
feq[16]=(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
feq[17]=(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
feq[18]=(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
if(SmagLES == 1)
{
float PI11 = (f[1 ]-feq[1 ])+(f[3 ]-feq[3 ])+(f[5 ]-feq[5 ])+
(f[6 ]-feq[6 ])+(f[7 ]-feq[7 ])+(f[8 ]-feq[8 ])+
(f[10]-feq[10])+(f[12]-feq[12])+(f[15]-feq[15])+
(f[17]-feq[17]);
float PI22 = (f[2 ]-feq[2 ])+(f[4 ]-feq[4 ])+(f[5 ]-feq[5 ])+
(f[6 ]-feq[6 ])+(f[7 ]-feq[7 ])+(f[8 ]-feq[8 ])+
(f[11]-feq[11])+(f[13]-feq[13])+(f[16]-feq[16])+
(f[18]-feq[18]);
float PI33 = (f[9 ]-feq[9 ])+(f[14]-feq[14])+(f[10]-feq[10])+
(f[12]-feq[12])+(f[15]-feq[15])+(f[17]-feq[17])+
(f[11]-feq[11])+(f[13]-feq[13])+(f[16]-feq[16])+
(f[18]-feq[18]);
float PI12 = (f[5 ]-feq[5 ])+(f[7 ]-feq[7 ])-(f[6 ]-feq[6 ])-(f[8 ]-feq[8 ]);
float PI13 = (f[10]-feq[10])+(f[17]-feq[17])-(f[12]-feq[12])-(f[15]-feq[15]);
float PI23 = (f[11]-feq[11])+(f[18]-feq[18])-(f[13]-feq[13])-(f[16]-feq[16]);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[0 ] -=omega*(f[0 ]-feq[0 ]);
f[1 ] -=omega*(f[1 ]-feq[1 ]);
f[2 ] -=omega*(f[2 ]-feq[2 ]);
f[3 ] -=omega*(f[3 ]-feq[3 ]);
f[4 ] -=omega*(f[4 ]-feq[4 ]);
f[5 ] -=omega*(f[5 ]-feq[5 ]);
f[6 ] -=omega*(f[6 ]-feq[6 ]);
f[7 ] -=omega*(f[7 ]-feq[7 ]);
f[8 ] -=omega*(f[8 ]-feq[8 ]);
f[9 ] -=omega*(f[9 ]-feq[9 ]);
f[10] -=omega*(f[10]-feq[10]);
f[11] -=omega*(f[11]-feq[11]);
f[12] -=omega*(f[12]-feq[12]);
f[13] -=omega*(f[13]-feq[13]);
f[14] -=omega*(f[14]-feq[14]);
f[15] -=omega*(f[15]-feq[15]);
f[16] -=omega*(f[16]-feq[16]);
f[17] -=omega*(f[17]-feq[17]);
f[18] -=omega*(f[18]-feq[18]);
AddForce(f,dpdy);
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float v)
{
float m[19];
float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u, int t)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
//if(t == 1000 || t == 2000 || t == 3000) w = 0.01f;
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, float& wAv, int t)
{
float u,v,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
wAv = (wAv*(t-START_VELAV)+w)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& wAv, float& ufluc, float& vfluc, float& wfluc, int t)
{
float u,v,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
w = (w-wAv)*(w-wAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
wfluc = (wfluc*(t-START_VELFLUC)+w)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord,0);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
bgk_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z);
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.0f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
bgk_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,1,y,pitch)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,1,pitch)];
}
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2),t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,1,y,pitch)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,1,pitch)];
}
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velAv_w, float* velFluc_u, float* velFluc_v, float* velFluc_w, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,u_in);
}
if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in,t);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)];
}
if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)];
}
if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)];
}
if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)];
}
if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)];
}
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,w_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
velAv_w[x+y*pitch+(z+1)*pitch*YDIM] = w_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_Av = velAv_w[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
float w_fluc = velFluc_w[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,w_Av,u_fluc,v_fluc,w_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
velFluc_w[x+y*pitch+(z+1)*pitch*YDIM] = w_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[19];
Moments(f,mom);
for(int i = 0; i<19; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
//float zcoord = LRZ0+GPU*LRFACTOR*z;
float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1);
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,dpdy*LRFACTOR);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][19];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
//int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<19; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[19];
for(int i = 0; i<19; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][3];m05=mom_c[xm][0][0][5];m07=mom_c[xm][0][0][7];
m13=mom_c[xp][0][0][3];m15=mom_c[xp][0][0][5];m17=mom_c[xp][0][0][7];
m23=mom_c[xm][1][0][3];m25=mom_c[xm][1][0][5];m27=mom_c[xm][1][0][7];
m33=mom_c[xp][1][0][3];m35=mom_c[xp][1][0][5];m37=mom_c[xp][1][0][7];
m43=mom_c[xm][0][1][3];m45=mom_c[xm][0][1][5];m47=mom_c[xm][0][1][7];
m53=mom_c[xp][0][1][3];m55=mom_c[xp][0][1][5];m57=mom_c[xp][0][1][7];
m63=mom_c[xm][1][1][3];m65=mom_c[xm][1][1][5];m67=mom_c[xm][1][1][7];
m73=mom_c[xp][1][1][3];m75=mom_c[xp][1][1][5];m77=mom_c[xp][1][1][7];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[3] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[5] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[7] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
//InvertPhysicalMoments(f,mom,SF);
//InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c);
ScaleMoments_bgk(mom,SF);
// mom[0] = 2.f;
// mom[3] = 0.1f;
// mom[5] = 0.1f;
// mom[7] = 0.1f;
InvertMoments(f,mom);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
//if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) &&
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][19];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
Moments(f,mom[0]);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
Moments(f,mom[1]);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
Moments(f,mom[2]);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
Moments(f,mom[3]);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
Moments(f,mom[4]);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
Moments(f,mom[5]);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
Moments(f,mom[6]);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
Moments(f,mom[7]);
StrainRate(S_f[7],mom[7],1.f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
Moments(f,mom[0]);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
Moments(f,mom[1]);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
Moments(f,mom[2]);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
Moments(f,mom[3]);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
Moments(f,mom[4]);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
Moments(f,mom[5]);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
Moments(f,mom[6]);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
Moments(f,mom[7]);
StrainRate(S_f[7],mom[7],1.f);
}
if(ORDER == 1){
for(int i = 0; i<19; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][3];m05=mom[0][5];m07=mom[0][7];
m13=mom[1][3];m15=mom[1][5];m17=mom[1][7];
m23=mom[2][3];m25=mom[2][5];m27=mom[2][7];
m33=mom[3][3];m35=mom[3][5];m37=mom[3][7];
m43=mom[4][3];m45=mom[4][5];m47=mom[4][7];
m53=mom[5][3];m55=mom[5][5];m57=mom[5][7];
m63=mom[6][3];m65=mom[6][5];m67=mom[6][7];
m73=mom[7][3];m75=mom[7][5];m77=mom[7][7];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<19; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][3] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][5] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][7] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
//InvertPhysicalMoments(f,mom[0],SF);
//InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f);
ScaleMoments_bgk(mom[0],SF);
InvertMoments(f,mom[0]);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
//int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z,0);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
__global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f[19];
float v_av = 0;
__shared__ float sumV[BLOCKSIZEX];
syncthreads();
if(z == 0){
for(int i = 0; i<19; i++)
f[i] = gA[buff_mem(i,x,DYNY1,pitch)];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
f[i] = hA[buff_mem(i,x,DYNY1,pitch)];
}
else{
for(int i = 0; i<19; i++)
f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)];
}
sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18];
syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Av_V[t],sumV[0]);
}
}
void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<","<<velAv[2][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<velFluc[2][i+j*XDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<", "
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
if(k == 1 && GPU == 0){
outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "<<velAv[2][i+j*XDIM+k*XDIM*YDIM]<<","
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<velAv[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[2][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl;
}}
}
void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"velAv[2]\",\"ufluc\",\"vfluc\",\"wfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<","<<velAv[2][i+j*XLRDIM]
<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<velFluc[2][i+j*XLRDIM]
<<","<<0<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR;
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "<<velAv [2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
if(k == 3 && GPU == 0){
outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "<<velAv [2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "<<velAv[2][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[2][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteAvV(float *v, ofstream &output)
{
for(int i = 0; i<TMAX; i++){
output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"DPDY \t: "<<DPDY<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; cudaGetDeviceCount(&GPU_N);
GPU_N=NUMGPU;
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
outputAvV.open ((FileName+".vel").c_str());
ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice;
for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){
//string filenum = to_string(i);
char str[10];
snprintf(str,10,"%i",i);
outputpart[i].open ((FileName+"_part"+str+".dat").c_str());
}
outputslice.open ((FileName+"_slice.dat").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
float *Av_V_h[GPU_N];
float *Av_V_d[GPU_N];
float dpdy = DPDY;
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
Av_V_h[n] = (float *)malloc(TMAX*sizeof(float));
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) cudaDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
cudaMalloc((void **) & Av_V_d[n],TMAX*sizeof(float));
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int j=0;j<(ForceTime);j++)
Av_V_h[n][j] = 0;
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
}
cudaMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),cudaMemcpyHostToDevice);
//initialization kernels
for(int i = 0; i<2; i++){
initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,GPU_N);
initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,GPU_N);
initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,GPU_N);
}
initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,GPU_N);
initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,GPU_N);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*19*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
cudaSetDevice(n);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*19*sizeof(float));
cudaMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
cudaMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
cudaMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
cudaMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*19*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
initializeLR<<< LR_grid,LR_threads>>>(f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_d[n][i],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_temp[n],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
cudaFuncSetCacheConfig(InterpCF,cudaFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
size_t mem_avail, mem_total;
cudaMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_d[ (n+1)%GPU_N][A][0], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_d[abs(n-1)%GPU_N][A][0],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velAv_d[n][2],velFluc_d[n][0],velFluc_d[n][1],velFluc_d[n][2],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
cudaSetDevice(n);
update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy);
update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy);
}
if(t%100 == 0 && t>1000)
{
for(int n = 0; n<GPU_N; n++)
cudaDeviceSynchronize();
for(int n = 0; n<GPU_N; n++)
{
AverageV<<<AvV_grid, threads>>>(f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t);
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),cudaMemcpyDeviceToHost);
float Av_V = 0;
for(int n = 0; n<GPU_N; n++)
Av_V += Av_V_h[n][t];
Av_V /= (XDIM-2)*ZDIM;
float diff;
diff = (Av_V-UMAX)/UMAX;
dpdy += diff*KP*abs(DPDY);
//dpdy = max(DPDY*)
// if(Av_V < UMAX*0.995f)
// dpdy *= 1.01f;
// else if(Av_V > UMAX*1.005f)
// dpdy *= 0.99f;
if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl;
}
//cudaDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn_LR<<<LR_grid,LR_threads,0,stream_inner[n]>>>(f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
update_bot_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F,dpdy);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//cudaMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpCF<<<Interp_grid,Interp_threads,0,stream_inner[n]>>>(f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner);
//cudaDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpFC<<<Interp_grid_c,threads,0,stream_halo[n]>>>(f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
}//end time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
}
cudaMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,cudaMemcpyDeviceToHost);
WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
outputpart[n]<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
if(n > 0){
for(int j=0;j<TMAX;j++)
Av_V_h[0][j] += Av_V_h[n][j];
}
for(int i = 0; i<2; i++){
cudaFree(f_d[n][i]);
cudaFree(g_d[n][i]);
cudaFree(h_d[n][i]);
}
cudaFree(f_d[n]);
cudaFree(g_d[n]);
cudaFree(h_d[n]);
cudaFree(g_temp[n]);
cudaFree(h_temp[n]);
for(int i=0;i<3;i++)
cudaFree(F_d[n][i]);
cudaFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
//WriteAvV(Av_V_h[0],outputAvV);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
//cudaMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
}
WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
outputpart[GPU_N+n]<<endl;
for(int i = 0; i<2; i++){
cudaFree(f_LR_d[n][i]);
cudaFree(g_LR_d[n][i]);
cudaFree(h_LR_d[n][i]);
}
cudaFree(f_LR_d[n]);
cudaFree(g_LR_d[n]);
cudaFree(h_LR_d[n]);
cudaFree(g_LR_temp[n]);
cudaFree(h_LR_temp[n]);
}
}
return 0;
}
|
a8caa99f231d0d0ae4653530a5a3b3e9b52d4a45.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "grad_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int NCOMP, int P, int Q, int MAXPQ>
static __global__ void
magma_gradn_2d_kernel(
const T *dinterp1d, const T *dgrad1d, magma_trans_t transT,
const T *dU, const int estrdU, const int cstrdU, const int dstrdU,
T *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rU[1][NCOMP][P] = { make_zero<T>() }; // here DIMU = 1, but might be different for a fused operator
T rV[1][NCOMP][Q] = { make_zero<T>() }; // here DIMV = 1, but might be different for a fused operator
T rTmp = make_zero<T>();
// shift global memory pointers by elem stride
dU += elem_id * estrdU;
dV += elem_id * estrdV;
// assign shared memory pointers
T* sTinterp = (T*)(shared_data);
T* sTgrad = sTinterp + P*Q;
T* sTmp = sTgrad + P*Q;
sTmp += ty * (P * MAXPQ);
// read T
if (ty == 0) {
dread_T_gm2sm<P, Q>(tx, transT, dinterp1d, sTinterp);
dread_T_gm2sm<P, Q>(tx, transT, dgrad1d, sTgrad);
}
// No need to read V ( required only in transposed grad )
const T beta = make_zero<T>();
/* read U (idim = 0 for dU, iDIM = 0 for rU) --
there is a sync at the end of this function */
readU_2d<T, P, 1, NCOMP, P, 0>
(dU + (0*dstrdU), cstrdU, rU, sTmp, tx);
/* first call (iDIM = 0, iDIMU = 0, iDIMV = 0) --
output from rV[0][][] into dV (idim = 0) */
magma_grad_2d_device<T, 1, 1, NCOMP, P, Q, P, Q, 0, 0, 0>
(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp);
/* there is a sync at the end of magma_grad_2d_device */
writeV_2d<T, Q, 1, NCOMP, Q, 0>
(dV+(0*dstrdV), cstrdV, rV, tx);
/* second call (iDIM = 1, iDIMU = 0, iDIMV = 0) --
output from rV[0][][] into dV (idim = 1) */
magma_grad_2d_device<T, 1, 1, NCOMP, P, Q, P, Q, 1, 0, 0>
(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp);
/* there is a sync at the end of magma_grad_2d_device */
writeV_2d<T, Q, 1, NCOMP, Q, 0>
(dV+(1*dstrdV), cstrdV, rV, tx);
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_gradn_2d_kernel_driver(
const T *dinterp1d, const T *dgrad1d, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
const int MAXPQ = maxpq(P,Q);
magma_int_t nthreads = MAXPQ;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * 2*P*Q; // for sTinterp and sTgrad
shmem += sizeof(T) * ntcol * (P*MAXPQ); // for reforming rU we need PxP, and for the intermediate output we need PxQ
hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device);
#if TORCH_HIP_VERSION >= 9000
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device);
if (shmem <= shmem_max) {
hipFuncSetAttribute(magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device);
#endif // TORCH_HIP_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
// IMPORTANT: we instantiate with DIM=1 instead of DIM=2 because the kernel handles one dimension at a time
// We should instantiate with DIM >= 1 when we fuse the whole operator, because of the q-function
hipLaunchKernelGGL(( magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue),
dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem);
return (hipPeekAtLastError() == hipSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_gradn_2d_ncomp(
magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,1,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,2,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,3,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_gradn_2d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_gradn_2d_ncomp<P, 1>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_ncomp<P, 2>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_ncomp<P, 3>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_gradn_2d_ncomp<P, 4>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_gradn_2d_ncomp<P, 5>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_gradn_2d_ncomp<P, 6>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_gradn_2d_ncomp<P, 7>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_gradn_2d_ncomp<P, 8>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_gradn_2d_ncomp<P, 9>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_gradn_2d_ncomp<P,10>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_gradn_2d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_gradn_2d_ncomp_q< 1>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_ncomp_q< 2>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_ncomp_q< 3>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_gradn_2d_ncomp_q< 4>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_gradn_2d_ncomp_q< 5>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_gradn_2d_ncomp_q< 6>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_gradn_2d_ncomp_q< 7>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_gradn_2d_ncomp_q< 8>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_gradn_2d_ncomp_q< 9>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_gradn_2d_ncomp_q<10>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_gradn_2d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_gradn_2d_ncomp_q_p(
P, Q, ncomp,
dinterp1d, dgrad1d, transT,
dU, estrdU, cstrdU, dstrdU,
dV, estrdV, cstrdV, dstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
|
a8caa99f231d0d0ae4653530a5a3b3e9b52d4a45.cu
|
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <cuda.h> // for CUDA_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "grad_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int NCOMP, int P, int Q, int MAXPQ>
static __global__ void
magma_gradn_2d_kernel(
const T *dinterp1d, const T *dgrad1d, magma_trans_t transT,
const T *dU, const int estrdU, const int cstrdU, const int dstrdU,
T *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rU[1][NCOMP][P] = { make_zero<T>() }; // here DIMU = 1, but might be different for a fused operator
T rV[1][NCOMP][Q] = { make_zero<T>() }; // here DIMV = 1, but might be different for a fused operator
T rTmp = make_zero<T>();
// shift global memory pointers by elem stride
dU += elem_id * estrdU;
dV += elem_id * estrdV;
// assign shared memory pointers
T* sTinterp = (T*)(shared_data);
T* sTgrad = sTinterp + P*Q;
T* sTmp = sTgrad + P*Q;
sTmp += ty * (P * MAXPQ);
// read T
if (ty == 0) {
dread_T_gm2sm<P, Q>(tx, transT, dinterp1d, sTinterp);
dread_T_gm2sm<P, Q>(tx, transT, dgrad1d, sTgrad);
}
// No need to read V ( required only in transposed grad )
const T beta = make_zero<T>();
/* read U (idim = 0 for dU, iDIM = 0 for rU) --
there is a sync at the end of this function */
readU_2d<T, P, 1, NCOMP, P, 0>
(dU + (0*dstrdU), cstrdU, rU, sTmp, tx);
/* first call (iDIM = 0, iDIMU = 0, iDIMV = 0) --
output from rV[0][][] into dV (idim = 0) */
magma_grad_2d_device<T, 1, 1, NCOMP, P, Q, P, Q, 0, 0, 0>
(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp);
/* there is a sync at the end of magma_grad_2d_device */
writeV_2d<T, Q, 1, NCOMP, Q, 0>
(dV+(0*dstrdV), cstrdV, rV, tx);
/* second call (iDIM = 1, iDIMU = 0, iDIMV = 0) --
output from rV[0][][] into dV (idim = 1) */
magma_grad_2d_device<T, 1, 1, NCOMP, P, Q, P, Q, 1, 0, 0>
(sTinterp, sTgrad, rU, rV, beta, tx, rTmp, sTmp);
/* there is a sync at the end of magma_grad_2d_device */
writeV_2d<T, Q, 1, NCOMP, Q, 0>
(dV+(1*dstrdV), cstrdV, rV, tx);
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int NCOMP, int P, int Q>
static magma_int_t
magma_gradn_2d_kernel_driver(
const T *dinterp1d, const T *dgrad1d, magma_trans_t transT,
const T *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
T *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
const int MAXPQ = maxpq(P,Q);
magma_int_t nthreads = MAXPQ;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * 2*P*Q; // for sTinterp and sTgrad
shmem += sizeof(T) * ntcol * (P*MAXPQ); // for reforming rU we need PxP, and for the intermediate output we need PxQ
cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device);
#if CUDA_VERSION >= 9000
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (shmem <= shmem_max) {
cudaFuncSetAttribute(magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device);
#endif // CUDA_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
// IMPORTANT: we instantiate with DIM=1 instead of DIM=2 because the kernel handles one dimension at a time
// We should instantiate with DIM >= 1 when we fuse the whole operator, because of the q-function
magma_gradn_2d_kernel<T,NCOMP,P,Q,MAXPQ><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem);
return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
magma_gradn_2d_ncomp(
magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (ncomp) {
case 1:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,1,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,2,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_kernel_driver<CeedScalar,3,P,Q>
(dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magma_gradn_2d_ncomp_q(
magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_gradn_2d_ncomp<P, 1>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_ncomp<P, 2>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_ncomp<P, 3>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_gradn_2d_ncomp<P, 4>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_gradn_2d_ncomp<P, 5>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_gradn_2d_ncomp<P, 6>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_gradn_2d_ncomp<P, 7>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_gradn_2d_ncomp<P, 8>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_gradn_2d_ncomp<P, 9>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_gradn_2d_ncomp<P,10>
(ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_gradn_2d_ncomp_q_p(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, magma_trans_t transT,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (P) {
case 1:
launch_failed = magma_gradn_2d_ncomp_q< 1>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_gradn_2d_ncomp_q< 2>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_gradn_2d_ncomp_q< 3>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_gradn_2d_ncomp_q< 4>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_gradn_2d_ncomp_q< 5>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_gradn_2d_ncomp_q< 6>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_gradn_2d_ncomp_q< 7>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_gradn_2d_ncomp_q< 8>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_gradn_2d_ncomp_q< 9>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_gradn_2d_ncomp_q<10>
(Q, ncomp, dinterp1d, dgrad1d, transT, dU, estrdU, cstrdU, dstrdU, dV, estrdV, cstrdV, dstrdV, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_gradn_2d(
magma_int_t P, magma_int_t Q, magma_int_t ncomp,
const CeedScalar *dinterp1d, const CeedScalar *dgrad1d, CeedTransposeMode tmode,
const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, magma_int_t dstrdU,
CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t dstrdV,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magma_gradn_2d_ncomp_q_p(
P, Q, ncomp,
dinterp1d, dgrad1d, transT,
dU, estrdU, cstrdU, dstrdU,
dV, estrdV, cstrdV, dstrdV,
nelem, maxthreads, queue);
return launch_failed;
}
|
90bb3b0ebc8a295dc66108f99229ed36b1af983a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
bools[index] = (idata[index] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
90bb3b0ebc8a295dc66108f99229ed36b1af983a.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
bools[index] = (idata[index] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
4e7eb1076c672036074f5a273d35d8497cd4ff80.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bellmanFord.hpp"
__global__
void relax(int u, int count){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int v = gpuParams.edge[gpuParams.index[u] + tid];
if(tid > count || v == u){
return;
}
printf("Relax spawned by thread: %d\n", u);
printf("vertex being processed: %d and distance: %d\n", v, gpuParams.distance[v]);
if(gpuParams.distance[v] > (gpuParams.distance[u] + gpuParams.weight[gpuParams.index[u] + tid])){
gpuParams.distance[v] = gpuParams.distance[u] + gpuParams.weight[gpuParams.index[u] + tid];
if(gpuParams.index[v + 1] - gpuParams.index[v])
gpuParams.f2[v] = true;
gpuParams.pi[v] = u;
}
printf("vertex after processed: %d and distance: %d\n", v, gpuParams.distance[v]);
}
|
4e7eb1076c672036074f5a273d35d8497cd4ff80.cu
|
#include "bellmanFord.hpp"
__global__
void relax(int u, int count){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int v = gpuParams.edge[gpuParams.index[u] + tid];
if(tid > count || v == u){
return;
}
printf("Relax spawned by thread: %d\n", u);
printf("vertex being processed: %d and distance: %d\n", v, gpuParams.distance[v]);
if(gpuParams.distance[v] > (gpuParams.distance[u] + gpuParams.weight[gpuParams.index[u] + tid])){
gpuParams.distance[v] = gpuParams.distance[u] + gpuParams.weight[gpuParams.index[u] + tid];
if(gpuParams.index[v + 1] - gpuParams.index[v])
gpuParams.f2[v] = true;
gpuParams.pi[v] = u;
}
printf("vertex after processed: %d and distance: %d\n", v, gpuParams.distance[v]);
}
|
7406d7836916f28f7d68fa5a3e1b5257e2d860b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* DP_parallel_GPU.cu
* by Brandon Chow and Tahmid Rahman
* implemented for CS87 final project
* uses GPU to solve LCS using parallel Waveform
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "myopengllib.h"
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#include <math.h>
#include <sys/types.h>
#include <signal.h>
static int N = 512;
//struct for storing useful information to pass into device
typedef struct my_cuda_data {
int* read_grid;
hiprandState_t *dev_random;
int N;
int num_iters;
int cur_iters;
hipEvent_t start;
hipEvent_t stop;
int* string_a;
int* string_b;
int currMax;
} my_cuda_data;
my_cuda_data cudaData;
static void clean_up(void);
static void compute_LCS(uchar4 *devPtr, void *my_data);
__global__ void LCS_kernel(my_cuda_data data);
void usage();
//main function
int main(int argc, char *argv[]) {
int i, j;
//process_args(argc, argv);
//int world[N][N];
//parse input
if (argc != 1){
if(argc != 2) {
printf("usage: ./dp_rand (size) \n");
exit(0);
} else {
N = atoi(argv[1]);
}
}
N = N+1;
int* world;
int* string_a = new int[N-1];
int* string_b = new int[N-1];
for (int i = 0; i < N-1; i++) {
string_a[i] = rand() % 26;
}
for (int i = 0; i < N-1; i++) {
string_b[i] = rand()%26;
}
//set up world
world = new int[N*N]();
hipEventCreate(&(cudaData.start));
hipEventCreate(&(cudaData.stop));
GPUDisplayData my_display(N, N, &cudaData, "Simple openGL-Cuda");
//preset table to start off with negative numbers associated with
//when the cell should "wake up"
for(i=0; i < N; i++){
for(j=0; j < N; j++){
world[i*N + j] = -1*i + -1*j;
}
}
cudaData.num_iters = 2*N;
cudaData.cur_iters = 0;
cudaData.N = N;
cudaData.currMax = 0;
//allocate memory for grid
HANDLE_ERROR(hipMalloc((void**)&cudaData.read_grid,
sizeof(int)*N*N), "malloc read_grid") ;
// copy the initial data to the GPU
HANDLE_ERROR (hipMemcpy(cudaData.read_grid, world,
sizeof(int)*N*N, hipMemcpyHostToDevice), "copy read_grid to GPU") ;
//allocate memory for string_a
HANDLE_ERROR(hipMalloc((void**)&cudaData.string_a,
sizeof(int)*(N-1)), "malloc read_grid") ;
// copy the initial data to the GPU
HANDLE_ERROR (hipMemcpy(cudaData.string_a, string_a,
sizeof(int)*(N-1), hipMemcpyHostToDevice), "copy string_a to GPU") ;
//allocate memory for string_b
HANDLE_ERROR(hipMalloc((void**)&cudaData.string_b,
sizeof(int)*(N-1)), "malloc read_grid")
// copy the initial data to the GPU
HANDLE_ERROR (hipMemcpy(cudaData.string_b, string_b,
sizeof(int)*(N-1), hipMemcpyHostToDevice), "copy string_b to GPU") ;
// register a clean-up function on exit that will call hipFree
// on any hipMalloc'ed space
my_display.RegisterExitFunction(clean_up);
// have the library run our Cuda animation
my_display.AnimateComputation(compute_LCS);
return 0;
}//end main
/* clean_up
*
* passed to RegisterExitFunction
* it is called when the program exits and should clean up
* all hipMalloc'ed state.
*/
static void clean_up(void) {
hipFree(cudaData.read_grid);
hipFree(cudaData.dev_random);
}
/* computeLCS
* @ inputs: devPtr
* @ inputs: my_data - a struct consisting of important data for LCS
*
* This function computes the LCS of two strings.
*/
static void compute_LCS(uchar4 *devPtr, void *my_data) {
//printf("INSIDE ANIMATE FIRE\n");
char place_holder;
//divvy up blocks and threads
my_cuda_data * cudaData = (my_cuda_data *)my_data;
//count iterations
int c_iters = cudaData->cur_iters;
cudaData->cur_iters = c_iters + 1;
int thread_count;
//if we've done the max number of iterations, handle timing info and quit
if (c_iters == cudaData->num_iters){
float tim;
hipEventRecord(cudaData->stop, 0);
hipEventSynchronize(cudaData->stop);
hipEventElapsedTime(&tim, cudaData->start, cudaData->stop);
//clean_up();
printf ("\n\nThe animation took: %f s\n\n", tim/1000);
printf("\nPress any key to exit.\n");
scanf("%c", &place_holder);
kill(getpid(), SIGKILL);
}
thread_count = 512;
//set up blocks
dim3 blocks(N/thread_count,N/thread_count,1);
dim3 threads_block(thread_count,thread_count,1);
int N = cudaData->N;
float tim;
hipEventRecord(cudaData->start, 0);
//update cells
for (int i = 0; i < 2*N; i++){
hipLaunchKernelGGL(( LCS_kernel), dim3(blocks), dim3(threads_block), 0, 0, *cudaData);
}
hipEventRecord(cudaData->stop, 0);
hipEventSynchronize(cudaData->stop);
hipEventElapsedTime(&tim, cudaData->start, cudaData->stop);
//clean_up();
printf ("\n\nThe animation took: %f s\n\n", tim/1000);
printf("\nPress any key to exit.\n");
scanf("%c", &place_holder);
kill(getpid(), SIGKILL);
}
/* LCS_kernel
* @ inputs: data - a struct containing useful data for solving LCS
*
* this function either:
* increments a cell's age by 1
* computes the LCS value of a cell based on the LCS algorithm if age = 0
*/
__global__ void LCS_kernel(my_cuda_data data){
int N = data.N;
int* read_data = data.read_grid;
int* string_a = data.string_a;
int* string_b = data.string_b;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y*N;
int state = read_data[offset];
if (state < 0){
read_data[offset] = state+1;
}
else if (state == 0){
if (x == 0){
read_data[offset] = 0;
}
else if (y == 0){
read_data[offset] = 0;
}
else if (string_a[x] == string_b[y]){
int old_offset = (x-1) + (y-1)*N;
read_data[offset] = read_data[old_offset] + 1;
}
else{
int old_offset1 = (x-1) + y*N;
int old_offset2 = x + (y-1)*N;
int max_sublen1 = read_data[old_offset1];
int max_sublen2 = read_data[old_offset2];
if (max_sublen1 > max_sublen2){
read_data[offset] = max_sublen1;
}
else{
read_data[offset] = max_sublen2;
}
}
}
}
|
7406d7836916f28f7d68fa5a3e1b5257e2d860b0.cu
|
/* DP_parallel_GPU.cu
* by Brandon Chow and Tahmid Rahman
* implemented for CS87 final project
* uses GPU to solve LCS using parallel Waveform
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "myopengllib.h"
#include <curand_kernel.h>
#include <curand.h>
#include <math.h>
#include <sys/types.h>
#include <signal.h>
static int N = 512;
//struct for storing useful information to pass into device
typedef struct my_cuda_data {
int* read_grid;
curandState *dev_random;
int N;
int num_iters;
int cur_iters;
cudaEvent_t start;
cudaEvent_t stop;
int* string_a;
int* string_b;
int currMax;
} my_cuda_data;
my_cuda_data cudaData;
static void clean_up(void);
static void compute_LCS(uchar4 *devPtr, void *my_data);
__global__ void LCS_kernel(my_cuda_data data);
void usage();
//main function
int main(int argc, char *argv[]) {
int i, j;
//process_args(argc, argv);
//int world[N][N];
//parse input
if (argc != 1){
if(argc != 2) {
printf("usage: ./dp_rand (size) \n");
exit(0);
} else {
N = atoi(argv[1]);
}
}
N = N+1;
int* world;
int* string_a = new int[N-1];
int* string_b = new int[N-1];
for (int i = 0; i < N-1; i++) {
string_a[i] = rand() % 26;
}
for (int i = 0; i < N-1; i++) {
string_b[i] = rand()%26;
}
//set up world
world = new int[N*N]();
cudaEventCreate(&(cudaData.start));
cudaEventCreate(&(cudaData.stop));
GPUDisplayData my_display(N, N, &cudaData, "Simple openGL-Cuda");
//preset table to start off with negative numbers associated with
//when the cell should "wake up"
for(i=0; i < N; i++){
for(j=0; j < N; j++){
world[i*N + j] = -1*i + -1*j;
}
}
cudaData.num_iters = 2*N;
cudaData.cur_iters = 0;
cudaData.N = N;
cudaData.currMax = 0;
//allocate memory for grid
HANDLE_ERROR(cudaMalloc((void**)&cudaData.read_grid,
sizeof(int)*N*N), "malloc read_grid") ;
// copy the initial data to the GPU
HANDLE_ERROR (cudaMemcpy(cudaData.read_grid, world,
sizeof(int)*N*N, cudaMemcpyHostToDevice), "copy read_grid to GPU") ;
//allocate memory for string_a
HANDLE_ERROR(cudaMalloc((void**)&cudaData.string_a,
sizeof(int)*(N-1)), "malloc read_grid") ;
// copy the initial data to the GPU
HANDLE_ERROR (cudaMemcpy(cudaData.string_a, string_a,
sizeof(int)*(N-1), cudaMemcpyHostToDevice), "copy string_a to GPU") ;
//allocate memory for string_b
HANDLE_ERROR(cudaMalloc((void**)&cudaData.string_b,
sizeof(int)*(N-1)), "malloc read_grid")
// copy the initial data to the GPU
HANDLE_ERROR (cudaMemcpy(cudaData.string_b, string_b,
sizeof(int)*(N-1), cudaMemcpyHostToDevice), "copy string_b to GPU") ;
// register a clean-up function on exit that will call cudaFree
// on any cudaMalloc'ed space
my_display.RegisterExitFunction(clean_up);
// have the library run our Cuda animation
my_display.AnimateComputation(compute_LCS);
return 0;
}//end main
/* clean_up
*
* passed to RegisterExitFunction
* it is called when the program exits and should clean up
* all cudaMalloc'ed state.
*/
static void clean_up(void) {
cudaFree(cudaData.read_grid);
cudaFree(cudaData.dev_random);
}
/* computeLCS
* @ inputs: devPtr
* @ inputs: my_data - a struct consisting of important data for LCS
*
* This function computes the LCS of two strings.
*/
static void compute_LCS(uchar4 *devPtr, void *my_data) {
//printf("INSIDE ANIMATE FIRE\n");
char place_holder;
//divvy up blocks and threads
my_cuda_data * cudaData = (my_cuda_data *)my_data;
//count iterations
int c_iters = cudaData->cur_iters;
cudaData->cur_iters = c_iters + 1;
int thread_count;
//if we've done the max number of iterations, handle timing info and quit
if (c_iters == cudaData->num_iters){
float tim;
cudaEventRecord(cudaData->stop, 0);
cudaEventSynchronize(cudaData->stop);
cudaEventElapsedTime(&tim, cudaData->start, cudaData->stop);
//clean_up();
printf ("\n\nThe animation took: %f s\n\n", tim/1000);
printf("\nPress any key to exit.\n");
scanf("%c", &place_holder);
kill(getpid(), SIGKILL);
}
thread_count = 512;
//set up blocks
dim3 blocks(N/thread_count,N/thread_count,1);
dim3 threads_block(thread_count,thread_count,1);
int N = cudaData->N;
float tim;
cudaEventRecord(cudaData->start, 0);
//update cells
for (int i = 0; i < 2*N; i++){
LCS_kernel<<<blocks, threads_block>>>(*cudaData);
}
cudaEventRecord(cudaData->stop, 0);
cudaEventSynchronize(cudaData->stop);
cudaEventElapsedTime(&tim, cudaData->start, cudaData->stop);
//clean_up();
printf ("\n\nThe animation took: %f s\n\n", tim/1000);
printf("\nPress any key to exit.\n");
scanf("%c", &place_holder);
kill(getpid(), SIGKILL);
}
/* LCS_kernel
* @ inputs: data - a struct containing useful data for solving LCS
*
* this function either:
* increments a cell's age by 1
* computes the LCS value of a cell based on the LCS algorithm if age = 0
*/
__global__ void LCS_kernel(my_cuda_data data){
int N = data.N;
int* read_data = data.read_grid;
int* string_a = data.string_a;
int* string_b = data.string_b;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y*N;
int state = read_data[offset];
if (state < 0){
read_data[offset] = state+1;
}
else if (state == 0){
if (x == 0){
read_data[offset] = 0;
}
else if (y == 0){
read_data[offset] = 0;
}
else if (string_a[x] == string_b[y]){
int old_offset = (x-1) + (y-1)*N;
read_data[offset] = read_data[old_offset] + 1;
}
else{
int old_offset1 = (x-1) + y*N;
int old_offset2 = x + (y-1)*N;
int max_sublen1 = read_data[old_offset1];
int max_sublen2 = read_data[old_offset2];
if (max_sublen1 > max_sublen2){
read_data[offset] = max_sublen1;
}
else{
read_data[offset] = max_sublen2;
}
}
}
}
|
ee5b54b9c4e5a4a5e77d4df909f7ccaefb6082de.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <copying/legacy/scatter.hpp>
#include <utilities/legacy/column_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <nvstrings/NVCategory.h>
#include <algorithm>
namespace cudf {
namespace detail {
gdf_column stack(const cudf::table &values, hipStream_t stream = 0)
{
gdf_dtype dtype = values.get_column(0)->dtype;
gdf_size_type num_cols = values.num_columns();
gdf_size_type num_rows = values.num_rows();
for (auto &&col : values) {
CUDF_EXPECTS(col->dtype == dtype, "All columns must have the same type");
}
bool input_is_nullable = std::any_of(values.begin(), values.end(),
[](gdf_column const* col){ return is_nullable(*col); });
if (values.num_rows() == 0) {
return cudf::allocate_column(dtype, 0, input_is_nullable);
}
// Allocate output
gdf_column output = allocate_like(*values.get_column(0),
num_cols * num_rows);
// This needs to be done because the output is unnamed in pandas
free(output.col_name);
output.col_name = nullptr;
// PLAN:
// Sync column categories if they were GDF_STRING_CATEGORY and convert temporary
// columns to GDF_INT32 (using gdf_dtype_of). Then normal path till after scatter.
// Finally, do a NVCategory->gather on the result column.
std::vector<gdf_column *> temp_values;
if (dtype == GDF_STRING_CATEGORY) {
std::transform(values.begin(), values.end(), std::back_inserter(temp_values),
[] (const gdf_column *c) { return new gdf_column(allocate_like(*c)); } );
sync_column_categories(values.begin(), temp_values.data(), values.num_columns());
std::for_each(temp_values.begin(), temp_values.end(),
[] (gdf_column* c) { c->dtype = gdf_dtype_of<gdf_nvstring_category>(); });
output.dtype = gdf_dtype_of<gdf_nvstring_category>();
} else {
std::transform(values.begin(), values.end(), std::back_inserter(temp_values),
[] (const gdf_column *c) { return const_cast<gdf_column *>(c); } );
}
// Allocate scatter map
rmm::device_vector<gdf_size_type> scatter_map(values.num_rows());
auto counting_it = thrust::make_counting_iterator(0);
auto strided_it = thrust::make_transform_iterator(counting_it,
[num_cols] __device__ (auto i){ return num_cols * i; });
thrust::copy(strided_it, strided_it + num_rows, scatter_map.begin());
cudf::table output_table{&output};
for (auto &&col : temp_values) {
cudf::table single_col_table = { const_cast<gdf_column*>(col) };
detail::scatter(&single_col_table, scatter_map.data().get(), &output_table);
thrust::transform(scatter_map.begin(), scatter_map.end(), scatter_map.begin(),
[] __device__ (auto i) { return ++i; });
}
if (dtype == GDF_STRING_CATEGORY)
{
output.dtype = GDF_STRING_CATEGORY;
nvcategory_gather(&output, static_cast<NVCategory*>(temp_values[0]->dtype_info.category));
std::for_each(temp_values.begin(), temp_values.end(),
[] (gdf_column* c) { gdf_column_free(c); });
}
return output;
}
} // namespace detail
gdf_column stack(const cudf::table &values) {
return detail::stack(values);
}
} // namespace cudf
|
ee5b54b9c4e5a4a5e77d4df909f7ccaefb6082de.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <copying/legacy/scatter.hpp>
#include <utilities/legacy/column_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <nvstrings/NVCategory.h>
#include <algorithm>
namespace cudf {
namespace detail {
gdf_column stack(const cudf::table &values, cudaStream_t stream = 0)
{
gdf_dtype dtype = values.get_column(0)->dtype;
gdf_size_type num_cols = values.num_columns();
gdf_size_type num_rows = values.num_rows();
for (auto &&col : values) {
CUDF_EXPECTS(col->dtype == dtype, "All columns must have the same type");
}
bool input_is_nullable = std::any_of(values.begin(), values.end(),
[](gdf_column const* col){ return is_nullable(*col); });
if (values.num_rows() == 0) {
return cudf::allocate_column(dtype, 0, input_is_nullable);
}
// Allocate output
gdf_column output = allocate_like(*values.get_column(0),
num_cols * num_rows);
// This needs to be done because the output is unnamed in pandas
free(output.col_name);
output.col_name = nullptr;
// PLAN:
// Sync column categories if they were GDF_STRING_CATEGORY and convert temporary
// columns to GDF_INT32 (using gdf_dtype_of). Then normal path till after scatter.
// Finally, do a NVCategory->gather on the result column.
std::vector<gdf_column *> temp_values;
if (dtype == GDF_STRING_CATEGORY) {
std::transform(values.begin(), values.end(), std::back_inserter(temp_values),
[] (const gdf_column *c) { return new gdf_column(allocate_like(*c)); } );
sync_column_categories(values.begin(), temp_values.data(), values.num_columns());
std::for_each(temp_values.begin(), temp_values.end(),
[] (gdf_column* c) { c->dtype = gdf_dtype_of<gdf_nvstring_category>(); });
output.dtype = gdf_dtype_of<gdf_nvstring_category>();
} else {
std::transform(values.begin(), values.end(), std::back_inserter(temp_values),
[] (const gdf_column *c) { return const_cast<gdf_column *>(c); } );
}
// Allocate scatter map
rmm::device_vector<gdf_size_type> scatter_map(values.num_rows());
auto counting_it = thrust::make_counting_iterator(0);
auto strided_it = thrust::make_transform_iterator(counting_it,
[num_cols] __device__ (auto i){ return num_cols * i; });
thrust::copy(strided_it, strided_it + num_rows, scatter_map.begin());
cudf::table output_table{&output};
for (auto &&col : temp_values) {
cudf::table single_col_table = { const_cast<gdf_column*>(col) };
detail::scatter(&single_col_table, scatter_map.data().get(), &output_table);
thrust::transform(scatter_map.begin(), scatter_map.end(), scatter_map.begin(),
[] __device__ (auto i) { return ++i; });
}
if (dtype == GDF_STRING_CATEGORY)
{
output.dtype = GDF_STRING_CATEGORY;
nvcategory_gather(&output, static_cast<NVCategory*>(temp_values[0]->dtype_info.category));
std::for_each(temp_values.begin(), temp_values.end(),
[] (gdf_column* c) { gdf_column_free(c); });
}
return output;
}
} // namespace detail
gdf_column stack(const cudf::table &values) {
return detail::stack(values);
}
} // namespace cudf
|
8e16ca9728acb767ec8ccc6a0ecaa6679b65d3a4.hip
|
// !!! This is a file automatically generated by hipify!!!
/***********************************************
streamcluster.cpp
: original source code of streamcluster with minor
modification regarding function calls
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
#define MAXNAMESIZE 1024 // max filename length
#define SEED 1
#define SP 1 // number of repetitions of speedy must be >=1
#define ITER 3 // iterate ITER* k log k times; ITER >= 1
//#define PRINTINFO // Enables printing output
#define PROFILE // Enables timing info
//#define ENABLE_THREADS // Enables parallel execution
//#define INSERT_WASTE // Enables waste computation in dist function
#define CACHE_LINE 512 // cache line in byte
// GLOBAL
static bool *switch_membership; //whether to switch membership in pgain
static bool *is_center; //whether a point is a center
static int *center_table; //index table of centers
static int nproc; //# of threads
bool isCoordChanged;
// GPU Timing Info
double serial_t;
double cpu_to_gpu_t;
double gpu_to_cpu_t;
double alloc_t;
double kernel_t;
double free_t;
// instrumentation code
#ifdef PROFILE
double time_local_search;
double time_speedy;
double time_select_feasible;
double time_gain;
double time_shuffle;
double time_gain_dist;
double time_gain_init;
#endif
void inttofile(int data, char *filename){
FILE *fp = fopen(filename, "w");
fprintf(fp, "%d ", data);
fclose(fp);
}
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
int isIdentical(float *i, float *j, int D){
// tells whether two points of D dimensions are identical
int a = 0;
int equal = 1;
while (equal && a < D) {
if (i[a] != j[a]) equal = 0;
else a++;
}
if (equal) return 1;
else return 0;
}
/* comparator for floating point numbers */
static int floatcomp(const void *i, const void *j)
{
float a, b;
a = *(float *)(i);
b = *(float *)(j);
if (a > b) return (1);
if (a < b) return (-1);
return(0);
}
/* shuffle points into random order */
void shuffle(Points *points)
{
#ifdef PROFILE
double t1 = gettime();
#endif
long i, j;
Point temp;
for (i=0;i<points->num-1;i++) {
j=(lrand48()%(points->num - i)) + i;
temp = points->p[i];
points->p[i] = points->p[j];
points->p[j] = temp;
}
#ifdef PROFILE
double t2 = gettime();
time_shuffle += t2-t1;
#endif
}
/* shuffle an array of integers */
void intshuffle(int *intarray, int length)
{
#ifdef PROFILE
double t1 = gettime();
#endif
long i, j;
int temp;
for (i=0;i<length;i++) {
j=(lrand48()%(length - i))+i;
temp = intarray[i];
intarray[i]=intarray[j];
intarray[j]=temp;
}
#ifdef PROFILE
double t2 = gettime();
time_shuffle += t2-t1;
#endif
}
#ifdef INSERT_WASTE
float waste(float s )
{
for( int i =0 ; i< 4; i++ ) {
s += pow(s,0.78);
}
return s;
}
#endif
/* compute Euclidean distance squared between two points */
float dist(Point p1, Point p2, int dim)
{
int i;
float result=0.0;
for (i=0;i<dim;i++)
result += (p1.coord[i] - p2.coord[i])*(p1.coord[i] - p2.coord[i]);
#ifdef INSERT_WASTE
float s = waste(result);
result += s;
result -= s;
#endif
return(result);
}
/* run speedy on the points, return total cost of solution */
float pspeedy(Points *points, float z, long *kcenter, int pid, pthread_barrier_t* barrier)
{
#ifdef PROFILE
double t1 = gettime();
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
//my block
long bsize = points->num/nproc;
long k1 = bsize * pid;
long k2 = k1 + bsize;
if( pid == nproc-1 ) k2 = points->num;
static float totalcost;
static bool open = false;
static float* costs; //cost for each thread.
static int i;
#ifdef ENABLE_THREADS
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
#endif
#ifdef PRINTINFO
if( pid == 0 ){
fprintf(stderr, "Speedy: facility cost %lf\n", z);
}
#endif
/* create center at first point, send it to itself */
for( int k = k1; k < k2; k++ ) {
float distance = dist(points->p[k],points->p[0],points->dim);
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=0;
}
if( pid==0 ) {
*kcenter = 1;
costs = (float*)malloc(sizeof(float)*nproc);
}
if( pid != 0 ) { // we are not the master threads. we wait until a center is opened.
while(1) {
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
while(!open) pthread_cond_wait(&cond,&mutex);
pthread_mutex_unlock(&mutex);
#endif
if( i >= points->num ) break;
for( int k = k1; k < k2; k++ )
{
float distance = dist(points->p[i],points->p[k],points->dim);
if( distance*points->p[k].weight < points->p[k].cost )
{
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=i;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
pthread_barrier_wait(barrier);
#endif
}
}
else { // I am the master thread. I decide whether to open a center and notify others if so.
for(i = 1; i < points->num; i++ ) {
bool to_open = ((float)lrand48()/(float)INT_MAX)<(points->p[i].cost/z);
if( to_open ) {
(*kcenter)++;
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
#endif
open = true;
#ifdef ENABLE_THREADS
pthread_mutex_unlock(&mutex);
pthread_cond_broadcast(&cond);
#endif
for( int k = k1; k < k2; k++ ) {
float distance = dist(points->p[i],points->p[k],points->dim);
if( distance*points->p[k].weight < points->p[k].cost ) {
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=i;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
open = false;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
}
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
#endif
open = true;
#ifdef ENABLE_THREADS
pthread_mutex_unlock(&mutex);
pthread_cond_broadcast(&cond);
#endif
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
open = false;
float mytotal = 0;
for( int k = k1; k < k2; k++ ) {
mytotal += points->p[k].cost;
}
costs[pid] = mytotal;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
// aggregate costs from each thread
if( pid == 0 )
{
totalcost=z*(*kcenter);
for( int i = 0; i < nproc; i++ )
{
totalcost += costs[i];
}
free(costs);
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
#ifdef PRINTINFO
if( pid == 0 )
{
fprintf(stderr, "Speedy opened %d facilities for total cost %lf\n",
*kcenter, totalcost);
fprintf(stderr, "Distance Cost %lf\n", totalcost - z*(*kcenter));
}
#endif
#ifdef PROFILE
double t2 = gettime();
if( pid== 0 ) {
time_speedy += t2 -t1;
}
#endif
return(totalcost);
}
/* facility location on the points using local search */
/* z is the facility cost, returns the total cost and # of centers */
/* assumes we are seeded with a reasonable solution */
/* cost should represent this solution's cost */
/* halt if there is < e improvement after iter calls to gain */
/* feasible is an array of numfeasible points which may be centers */
float pFL(Points *points, int *feasible, int numfeasible,
float z, long *k, int kmax, float cost, long iter, float e,
int pid, pthread_barrier_t* barrier)
{
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
long i;
long x;
float change;
long numberOfPoints;
change = cost;
/* continue until we run iter iterations without improvement */
/* stop instead if improvement is less than e */
while (change/cost > 1.0*e) {
change = 0.0;
numberOfPoints = points->num;
/* randomize order in which centers are considered */
if( pid == 0 ) {
intshuffle(feasible, numfeasible);
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
for (i=0;i<iter;i++) {
x = i%numfeasible;
change += pgain(feasible[x], points, z, k, kmax, is_center, center_table, switch_membership, isCoordChanged,
&serial_t, &cpu_to_gpu_t, &gpu_to_cpu_t, &alloc_t, &kernel_t, &free_t);
}
cost -= change;
#ifdef PRINTINFO
if( pid == 0 ) {
fprintf(stderr, "%d centers, cost %lf, total distance %lf\n",
*k, cost, cost - z*(*k));
}
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
return(cost);
}
int selectfeasible_fast(Points *points, int **feasible, int kmin, int pid, pthread_barrier_t* barrier)
{
#ifdef PROFILE
double t1 = gettime();
#endif
int numfeasible = points->num;
if (numfeasible > (ITER*kmin*log((float)kmin)))
numfeasible = (int)(ITER*kmin*log((float)kmin));
*feasible = (int *)malloc(numfeasible*sizeof(int));
float* accumweight;
float totalweight;
/*
Calcuate my block.
For now this routine does not seem to be the bottleneck, so it is not parallelized.
When necessary, this can be parallelized by setting k1 and k2 to
proper values and calling this routine from all threads ( it is called only
by thread 0 for now ).
Note that when parallelized, the randomization might not be the same and it might
not be difficult to measure the parallel speed-up for the whole program.
*/
// long bsize = numfeasible;
long k1 = 0;
long k2 = numfeasible;
float w;
int l,r,k;
/* not many points, all will be feasible */
if (numfeasible == points->num) {
for (int i=k1;i<k2;i++)
(*feasible)[i] = i;
return numfeasible;
}
accumweight= (float*)malloc(sizeof(float)*points->num);
accumweight[0] = points->p[0].weight;
totalweight=0;
for( int i = 1; i < points->num; i++ ) {
accumweight[i] = accumweight[i-1] + points->p[i].weight;
}
totalweight=accumweight[points->num-1];
for(int i=k1; i<k2; i++ ) {
w = (lrand48()/(float)INT_MAX)*totalweight;
//binary search
l=0;
r=points->num-1;
if( accumweight[0] > w ) {
(*feasible)[i]=0;
continue;
}
while( l+1 < r ) {
k = (l+r)/2;
if( accumweight[k] > w ) {
r = k;
}
else {
l=k;
}
}
(*feasible)[i]=r;
}
free(accumweight);
#ifdef PROFILE
double t2 = gettime();
time_select_feasible += t2-t1;
#endif
return numfeasible;
}
/* compute approximate kmedian on the points */
float pkmedian(Points *points, long kmin, long kmax, long* kfinal,
int pid, pthread_barrier_t* barrier )
{
int i;
float cost;
float lastcost;
float hiz, loz, z;
static long k;
static int *feasible;
static int numfeasible;
static float* hizs;
if( pid==0 ) hizs = (float*)calloc(nproc,sizeof(float));
hiz = loz = 0.0;
long numberOfPoints = points->num;
long ptDimension = points->dim;
//my block
long bsize = points->num/nproc;
long k1 = bsize * pid;
long k2 = k1 + bsize;
if( pid == nproc-1 ) k2 = points->num;
#ifdef PRINTINFO
if( pid == 0 )
{
printf("Starting Kmedian procedure\n");
printf("%i points in %i dimensions\n", numberOfPoints, ptDimension);
}
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
float myhiz = 0;
for (long kk=k1;kk < k2; kk++ ) {
myhiz += dist(points->p[kk], points->p[0],
ptDimension)*points->p[kk].weight;
}
hizs[pid] = myhiz;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
for( int i = 0; i < nproc; i++ ) {
hiz += hizs[i];
}
loz=0.0; z = (hiz+loz)/2.0;
/* NEW: Check whether more centers than points! */
if (points->num <= kmax) {
/* just return all points as facilities */
for (long kk=k1;kk<k2;kk++) {
points->p[kk].assign = kk;
points->p[kk].cost = 0;
}
cost = 0;
if( pid== 0 ) {
free(hizs);
*kfinal = k;
}
return cost;
}
if( pid == 0 ) shuffle(points);
cost = pspeedy(points, z, &k, pid, barrier);
#ifdef PRINTINFO
if( pid == 0 )
printf("thread %d: Finished first call to speedy, cost=%lf, k=%i\n",pid,cost,k);
#endif
i=0;
/* give speedy SP chances to get at least kmin/2 facilities */
while ((k < kmin)&&(i<SP)) {
cost = pspeedy(points, z, &k, pid, barrier);
i++;
}
#ifdef PRINTINFO
if( pid==0)
printf("thread %d: second call to speedy, cost=%lf, k=%d\n",pid,cost,k);
#endif
/* if still not enough facilities, assume z is too high */
while (k < kmin) {
#ifdef PRINTINFO
if( pid == 0 ) {
printf("%lf %lf\n", loz, hiz);
printf("Speedy indicates we should try lower z\n");
}
#endif
if (i >= SP) {hiz=z; z=(hiz+loz)/2.0; i=0;}
if( pid == 0 ) shuffle(points);
cost = pspeedy(points, z, &k, pid, barrier);
i++;
}
/* now we begin the binary search for real */
/* must designate some points as feasible centers */
/* this creates more consistancy between FL runs */
/* helps to guarantee correct # of centers at the end */
if( pid == 0 )
{
numfeasible = selectfeasible_fast(points,&feasible,kmin,pid,barrier);
for( int i = 0; i< points->num; i++ ) {
is_center[points->p[i].assign]= true;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
while(1) {
#ifdef PRINTINFO
if( pid==0 )
{
printf("loz = %lf, hiz = %lf\n", loz, hiz);
printf("Running Local Search...\n");
}
#endif
/* first get a rough estimate on the FL solution */
// pthread_barrier_wait(barrier);
lastcost = cost;
cost = pFL(points, feasible, numfeasible,
z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.1, pid, barrier);
/* if number of centers seems good, try a more accurate FL */
if (((k <= (1.1)*kmax)&&(k >= (0.9)*kmin))||
((k <= kmax+2)&&(k >= kmin-2))) {
#ifdef PRINTINFO
if( pid== 0)
{
printf("Trying a more accurate local search...\n");
}
#endif
/* may need to run a little longer here before halting without
improvement */
cost = pFL(points, feasible, numfeasible,
z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.001, pid, barrier);
}
if (k > kmax) {
/* facilities too cheap */
/* increase facility cost and up the cost accordingly */
loz = z; z = (hiz+loz)/2.0;
cost += (z-loz)*k;
}
if (k < kmin) {
/* facilities too expensive */
/* decrease facility cost and reduce the cost accordingly */
hiz = z; z = (hiz+loz)/2.0;
cost += (z-hiz)*k;
}
/* if k is good, return the result */
/* if we're stuck, just give up and return what we have */
if (((k <= kmax)&&(k >= kmin))||((loz >= (0.999)*hiz)) )
{
break;
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
//clean up...
if( pid==0 ) {
free(feasible);
free(hizs);
*kfinal = k;
}
return cost;
}
/* compute the means for the k clusters */
int contcenters(Points *points)
{
long i, ii;
float relweight;
for (i=0;i<points->num;i++) {
/* compute relative weight of this point to the cluster */
if (points->p[i].assign != i) {
relweight=points->p[points->p[i].assign].weight + points->p[i].weight;
relweight = points->p[i].weight/relweight;
for (ii=0;ii<points->dim;ii++) {
points->p[points->p[i].assign].coord[ii]*=1.0-relweight;
points->p[points->p[i].assign].coord[ii]+=
points->p[i].coord[ii]*relweight;
}
points->p[points->p[i].assign].weight += points->p[i].weight;
}
}
return 0;
}
/* copy centers from points to centers */
void copycenters(Points *points, Points* centers, long* centerIDs, long offset)
{
long i;
long k;
bool *is_a_median = (bool *) calloc(points->num, sizeof(bool));
/* mark the centers */
for ( i = 0; i < points->num; i++ ) {
is_a_median[points->p[i].assign] = 1;
}
k=centers->num;
/* count how many */
for ( i = 0; i < points->num; i++ ) {
if ( is_a_median[i] ) {
memcpy( centers->p[k].coord, points->p[i].coord, points->dim * sizeof(float));
centers->p[k].weight = points->p[i].weight;
centerIDs[k] = i + offset;
k++;
}
}
centers->num = k;
free(is_a_median);
}
void* localSearchSub(void* arg_) {
pkmedian_arg_t* arg= (pkmedian_arg_t*)arg_;
pkmedian(arg->points,arg->kmin,arg->kmax,arg->kfinal,arg->pid,arg->barrier);
return NULL;
}
void localSearch( Points* points, long kmin, long kmax, long* kfinal ) {
#ifdef PROFILE
double t1 = gettime();
#endif
pthread_barrier_t barrier;
#ifdef ENABLE_THREADS
pthread_barrier_init(&barrier,NULL,nproc);
#endif
pthread_t* threads = new pthread_t[nproc];
pkmedian_arg_t* arg = new pkmedian_arg_t[nproc];
for( int i = 0; i < nproc; i++ ) {
arg[i].points = points;
arg[i].kmin = kmin;
arg[i].kmax = kmax;
arg[i].pid = i;
arg[i].kfinal = kfinal;
arg[i].barrier = &barrier;
#ifdef ENABLE_THREADS
pthread_create(threads+i,NULL,localSearchSub,(void*)&arg[i]);
#else
localSearchSub(&arg[0]);
#endif
}
for ( int i = 0; i < nproc; i++) {
#ifdef ENABLE_THREADS
pthread_join(threads[i],NULL);
#endif
}
delete[] threads;
delete[] arg;
#ifdef ENABLE_THREADS
pthread_barrier_destroy(&barrier);
#endif
#ifdef PROFILE
double t2 = gettime();
time_local_search += t2-t1;
#endif
}
void outcenterIDs( Points* centers, long* centerIDs, char* outfile ) {
FILE* fp = fopen(outfile, "w");
if( fp==NULL ) {
fprintf(stderr, "error opening %s\n",outfile);
exit(1);
}
int* is_a_median = (int*)calloc( sizeof(int), centers->num );
for( int i =0 ; i< centers->num; i++ ) {
is_a_median[centers->p[i].assign] = 1;
}
for( int i = 0; i < centers->num; i++ ) {
if( is_a_median[i] ) {
fprintf(fp, "%u\n", centerIDs[i]);
fprintf(fp, "%lf\n", centers->p[i].weight);
for( int k = 0; k < centers->dim; k++ ) {
fprintf(fp, "%lf ", centers->p[i].coord[k]);
}
fprintf(fp,"\n\n");
}
}
fclose(fp);
}
void streamCluster( PStream* stream,
long kmin, long kmax, int dim,
long chunksize, long centersize, char* outfile )
{
float* block = (float*)malloc( chunksize*dim*sizeof(float) );
float* centerBlock = (float*)malloc(centersize*dim*sizeof(float) );
long* centerIDs = (long*)malloc(centersize*dim*sizeof(long));
if( block == NULL ) {
fprintf(stderr,"not enough memory for a chunk!\n");
exit(1);
}
Points points;
points.dim = dim;
points.num = chunksize;
hipHostMalloc((void **)&points.p,chunksize*sizeof(Point));
// points.p = (Point *)malloc(chunksize*sizeof(Point));
for( int i = 0; i < chunksize; i++ ) {
points.p[i].coord = &block[i*dim];
}
Points centers;
centers.dim = dim;
centers.p = (Point *)malloc(centersize*sizeof(Point));
centers.num = 0;
for( int i = 0; i< centersize; i++ ) {
centers.p[i].coord = ¢erBlock[i*dim];
centers.p[i].weight = 1.0;
}
long IDoffset = 0;
long kfinal;
while(1) {
size_t numRead = stream->read(block, dim, chunksize );
fprintf(stderr,"read %d points\n",numRead);
if( stream->ferror() || numRead < (unsigned int)chunksize && !stream->feof() ) {
fprintf(stderr, "error reading data!\n");
exit(1);
}
points.num = numRead;
for( int i = 0; i < points.num; i++ ) {
points.p[i].weight = 1.0;
}
switch_membership = (bool*)malloc(points.num*sizeof(bool));
is_center = (bool*)calloc(points.num,sizeof(bool));
// center_table = (int*)malloc(points.num*sizeof(int));
hipHostMalloc((void **)¢er_table,points.num*sizeof(int));
localSearch(&points,kmin, kmax,&kfinal);
fprintf(stderr,"finish local search\n");
contcenters(&points);
isCoordChanged = true;
if( kfinal + centers.num > centersize ) {
//here we don't handle the situation where # of centers gets too large.
fprintf(stderr,"oops! no more space for centers\n");
exit(1);
}
#ifdef PRINTINFO
printf("finish cont center\n");
#endif
copycenters(&points, ¢ers, centerIDs, IDoffset);
IDoffset += numRead;
#ifdef PRINTINFO
printf("finish copy centers\n");
#endif
free(is_center);
free(switch_membership);
// free(center_table);
hipFree(center_table);
if( stream->feof() ) {
break;
}
}
//finally cluster all temp centers
switch_membership = (bool*)malloc(centers.num*sizeof(bool));
is_center = (bool*)calloc(centers.num,sizeof(bool));
// center_table = (int*)malloc(centers.num*sizeof(int));
hipHostMalloc((void **)¢er_table,centers.num*sizeof(int));
localSearch( ¢ers, kmin, kmax ,&kfinal );
contcenters(¢ers);
outcenterIDs( ¢ers, centerIDs, outfile);
}
int main(int argc, char **argv)
{
char *outfilename = new char[MAXNAMESIZE];
char *infilename = new char[MAXNAMESIZE];
long kmin, kmax, n, chunksize, clustersize;
int dim;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_streamcluster);
#endif
if (argc<10) {
fprintf(stderr,"usage: %s k1 k2 d n chunksize clustersize infile outfile nproc\n",
argv[0]);
fprintf(stderr," k1: Min. number of centers allowed\n");
fprintf(stderr," k2: Max. number of centers allowed\n");
fprintf(stderr," d: Dimension of each data point\n");
fprintf(stderr," n: Number of data points\n");
fprintf(stderr," chunksize: Number of data points to handle per step\n");
fprintf(stderr," clustersize: Maximum number of intermediate centers\n");
fprintf(stderr," infile: Input file (if n<=0)\n");
fprintf(stderr," outfile: Output file\n");
fprintf(stderr," nproc: Number of threads to use\n");
fprintf(stderr,"\n");
fprintf(stderr, "if n > 0, points will be randomly generated instead of reading from infile.\n");
exit(1);
}
kmin = atoi(argv[1]);
kmax = atoi(argv[2]);
dim = atoi(argv[3]);
n = atoi(argv[4]);
chunksize = atoi(argv[5]);
clustersize = atoi(argv[6]);
strcpy(infilename, argv[7]);
strcpy(outfilename, argv[8]);
nproc = atoi(argv[9]);
srand48(SEED);
PStream* stream;
if( n > 0 ) {
stream = new SimStream(n);
}
else {
stream = new FileStream(infilename);
}
double t1 = gettime();
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
serial_t = 0.0;
cpu_to_gpu_t = 0.0;
gpu_to_cpu_t = 0.0;
alloc_t = 0.0;
free_t = 0.0;
kernel_t = 0.0;
isCoordChanged = false;
streamCluster(stream, kmin, kmax, dim, chunksize, clustersize, outfilename );
freeDevMem();
freeHostMem();
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
double t2 = gettime();
printf("time = %lfs\n",t2-t1);
delete stream;
#ifdef PROFILE
printf("time pgain = %lfs\n", time_gain);
printf("time pgain_dist = %lfs\n", time_gain_dist);
printf("time pgain_init = %lfs\n", time_gain_init);
printf("time pselect = %lfs\n", time_select_feasible);
printf("time pspeedy = %lfs\n", time_speedy);
printf("time pshuffle = %lfs\n", time_shuffle);
printf("time localSearch = %lfs\n", time_local_search);
printf("\n\n");
printf("====CUDA Timing info (pgain)====\n");
printf("time serial = %lfs\n", serial_t/1000);
printf("time CPU to GPU memory copy = %lfs\n", cpu_to_gpu_t/1000);
printf("time GPU to CPU memory copy back = %lfs\n", gpu_to_cpu_t/1000);
printf("time GPU malloc = %lfs\n", alloc_t/1000);
printf("time GPU free = %lfs\n", free_t/1000);
printf("time kernel = %lfs\n", kernel_t/1000);
#endif
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
8e16ca9728acb767ec8ccc6a0ecaa6679b65d3a4.cu
|
/***********************************************
streamcluster.cpp
: original source code of streamcluster with minor
modification regarding function calls
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Sang-Ha (a.k.a Shawn) Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
#define MAXNAMESIZE 1024 // max filename length
#define SEED 1
#define SP 1 // number of repetitions of speedy must be >=1
#define ITER 3 // iterate ITER* k log k times; ITER >= 1
//#define PRINTINFO // Enables printing output
#define PROFILE // Enables timing info
//#define ENABLE_THREADS // Enables parallel execution
//#define INSERT_WASTE // Enables waste computation in dist function
#define CACHE_LINE 512 // cache line in byte
// GLOBAL
static bool *switch_membership; //whether to switch membership in pgain
static bool *is_center; //whether a point is a center
static int *center_table; //index table of centers
static int nproc; //# of threads
bool isCoordChanged;
// GPU Timing Info
double serial_t;
double cpu_to_gpu_t;
double gpu_to_cpu_t;
double alloc_t;
double kernel_t;
double free_t;
// instrumentation code
#ifdef PROFILE
double time_local_search;
double time_speedy;
double time_select_feasible;
double time_gain;
double time_shuffle;
double time_gain_dist;
double time_gain_init;
#endif
void inttofile(int data, char *filename){
FILE *fp = fopen(filename, "w");
fprintf(fp, "%d ", data);
fclose(fp);
}
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
int isIdentical(float *i, float *j, int D){
// tells whether two points of D dimensions are identical
int a = 0;
int equal = 1;
while (equal && a < D) {
if (i[a] != j[a]) equal = 0;
else a++;
}
if (equal) return 1;
else return 0;
}
/* comparator for floating point numbers */
static int floatcomp(const void *i, const void *j)
{
float a, b;
a = *(float *)(i);
b = *(float *)(j);
if (a > b) return (1);
if (a < b) return (-1);
return(0);
}
/* shuffle points into random order */
void shuffle(Points *points)
{
#ifdef PROFILE
double t1 = gettime();
#endif
long i, j;
Point temp;
for (i=0;i<points->num-1;i++) {
j=(lrand48()%(points->num - i)) + i;
temp = points->p[i];
points->p[i] = points->p[j];
points->p[j] = temp;
}
#ifdef PROFILE
double t2 = gettime();
time_shuffle += t2-t1;
#endif
}
/* shuffle an array of integers */
void intshuffle(int *intarray, int length)
{
#ifdef PROFILE
double t1 = gettime();
#endif
long i, j;
int temp;
for (i=0;i<length;i++) {
j=(lrand48()%(length - i))+i;
temp = intarray[i];
intarray[i]=intarray[j];
intarray[j]=temp;
}
#ifdef PROFILE
double t2 = gettime();
time_shuffle += t2-t1;
#endif
}
#ifdef INSERT_WASTE
float waste(float s )
{
for( int i =0 ; i< 4; i++ ) {
s += pow(s,0.78);
}
return s;
}
#endif
/* compute Euclidean distance squared between two points */
float dist(Point p1, Point p2, int dim)
{
int i;
float result=0.0;
for (i=0;i<dim;i++)
result += (p1.coord[i] - p2.coord[i])*(p1.coord[i] - p2.coord[i]);
#ifdef INSERT_WASTE
float s = waste(result);
result += s;
result -= s;
#endif
return(result);
}
/* run speedy on the points, return total cost of solution */
float pspeedy(Points *points, float z, long *kcenter, int pid, pthread_barrier_t* barrier)
{
#ifdef PROFILE
double t1 = gettime();
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
//my block
long bsize = points->num/nproc;
long k1 = bsize * pid;
long k2 = k1 + bsize;
if( pid == nproc-1 ) k2 = points->num;
static float totalcost;
static bool open = false;
static float* costs; //cost for each thread.
static int i;
#ifdef ENABLE_THREADS
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
#endif
#ifdef PRINTINFO
if( pid == 0 ){
fprintf(stderr, "Speedy: facility cost %lf\n", z);
}
#endif
/* create center at first point, send it to itself */
for( int k = k1; k < k2; k++ ) {
float distance = dist(points->p[k],points->p[0],points->dim);
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=0;
}
if( pid==0 ) {
*kcenter = 1;
costs = (float*)malloc(sizeof(float)*nproc);
}
if( pid != 0 ) { // we are not the master threads. we wait until a center is opened.
while(1) {
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
while(!open) pthread_cond_wait(&cond,&mutex);
pthread_mutex_unlock(&mutex);
#endif
if( i >= points->num ) break;
for( int k = k1; k < k2; k++ )
{
float distance = dist(points->p[i],points->p[k],points->dim);
if( distance*points->p[k].weight < points->p[k].cost )
{
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=i;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
pthread_barrier_wait(barrier);
#endif
}
}
else { // I am the master thread. I decide whether to open a center and notify others if so.
for(i = 1; i < points->num; i++ ) {
bool to_open = ((float)lrand48()/(float)INT_MAX)<(points->p[i].cost/z);
if( to_open ) {
(*kcenter)++;
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
#endif
open = true;
#ifdef ENABLE_THREADS
pthread_mutex_unlock(&mutex);
pthread_cond_broadcast(&cond);
#endif
for( int k = k1; k < k2; k++ ) {
float distance = dist(points->p[i],points->p[k],points->dim);
if( distance*points->p[k].weight < points->p[k].cost ) {
points->p[k].cost = distance * points->p[k].weight;
points->p[k].assign=i;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
open = false;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
}
#ifdef ENABLE_THREADS
pthread_mutex_lock(&mutex);
#endif
open = true;
#ifdef ENABLE_THREADS
pthread_mutex_unlock(&mutex);
pthread_cond_broadcast(&cond);
#endif
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
open = false;
float mytotal = 0;
for( int k = k1; k < k2; k++ ) {
mytotal += points->p[k].cost;
}
costs[pid] = mytotal;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
// aggregate costs from each thread
if( pid == 0 )
{
totalcost=z*(*kcenter);
for( int i = 0; i < nproc; i++ )
{
totalcost += costs[i];
}
free(costs);
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
#ifdef PRINTINFO
if( pid == 0 )
{
fprintf(stderr, "Speedy opened %d facilities for total cost %lf\n",
*kcenter, totalcost);
fprintf(stderr, "Distance Cost %lf\n", totalcost - z*(*kcenter));
}
#endif
#ifdef PROFILE
double t2 = gettime();
if( pid== 0 ) {
time_speedy += t2 -t1;
}
#endif
return(totalcost);
}
/* facility location on the points using local search */
/* z is the facility cost, returns the total cost and # of centers */
/* assumes we are seeded with a reasonable solution */
/* cost should represent this solution's cost */
/* halt if there is < e improvement after iter calls to gain */
/* feasible is an array of numfeasible points which may be centers */
float pFL(Points *points, int *feasible, int numfeasible,
float z, long *k, int kmax, float cost, long iter, float e,
int pid, pthread_barrier_t* barrier)
{
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
long i;
long x;
float change;
long numberOfPoints;
change = cost;
/* continue until we run iter iterations without improvement */
/* stop instead if improvement is less than e */
while (change/cost > 1.0*e) {
change = 0.0;
numberOfPoints = points->num;
/* randomize order in which centers are considered */
if( pid == 0 ) {
intshuffle(feasible, numfeasible);
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
for (i=0;i<iter;i++) {
x = i%numfeasible;
change += pgain(feasible[x], points, z, k, kmax, is_center, center_table, switch_membership, isCoordChanged,
&serial_t, &cpu_to_gpu_t, &gpu_to_cpu_t, &alloc_t, &kernel_t, &free_t);
}
cost -= change;
#ifdef PRINTINFO
if( pid == 0 ) {
fprintf(stderr, "%d centers, cost %lf, total distance %lf\n",
*k, cost, cost - z*(*k));
}
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
return(cost);
}
int selectfeasible_fast(Points *points, int **feasible, int kmin, int pid, pthread_barrier_t* barrier)
{
#ifdef PROFILE
double t1 = gettime();
#endif
int numfeasible = points->num;
if (numfeasible > (ITER*kmin*log((float)kmin)))
numfeasible = (int)(ITER*kmin*log((float)kmin));
*feasible = (int *)malloc(numfeasible*sizeof(int));
float* accumweight;
float totalweight;
/*
Calcuate my block.
For now this routine does not seem to be the bottleneck, so it is not parallelized.
When necessary, this can be parallelized by setting k1 and k2 to
proper values and calling this routine from all threads ( it is called only
by thread 0 for now ).
Note that when parallelized, the randomization might not be the same and it might
not be difficult to measure the parallel speed-up for the whole program.
*/
// long bsize = numfeasible;
long k1 = 0;
long k2 = numfeasible;
float w;
int l,r,k;
/* not many points, all will be feasible */
if (numfeasible == points->num) {
for (int i=k1;i<k2;i++)
(*feasible)[i] = i;
return numfeasible;
}
accumweight= (float*)malloc(sizeof(float)*points->num);
accumweight[0] = points->p[0].weight;
totalweight=0;
for( int i = 1; i < points->num; i++ ) {
accumweight[i] = accumweight[i-1] + points->p[i].weight;
}
totalweight=accumweight[points->num-1];
for(int i=k1; i<k2; i++ ) {
w = (lrand48()/(float)INT_MAX)*totalweight;
//binary search
l=0;
r=points->num-1;
if( accumweight[0] > w ) {
(*feasible)[i]=0;
continue;
}
while( l+1 < r ) {
k = (l+r)/2;
if( accumweight[k] > w ) {
r = k;
}
else {
l=k;
}
}
(*feasible)[i]=r;
}
free(accumweight);
#ifdef PROFILE
double t2 = gettime();
time_select_feasible += t2-t1;
#endif
return numfeasible;
}
/* compute approximate kmedian on the points */
float pkmedian(Points *points, long kmin, long kmax, long* kfinal,
int pid, pthread_barrier_t* barrier )
{
int i;
float cost;
float lastcost;
float hiz, loz, z;
static long k;
static int *feasible;
static int numfeasible;
static float* hizs;
if( pid==0 ) hizs = (float*)calloc(nproc,sizeof(float));
hiz = loz = 0.0;
long numberOfPoints = points->num;
long ptDimension = points->dim;
//my block
long bsize = points->num/nproc;
long k1 = bsize * pid;
long k2 = k1 + bsize;
if( pid == nproc-1 ) k2 = points->num;
#ifdef PRINTINFO
if( pid == 0 )
{
printf("Starting Kmedian procedure\n");
printf("%i points in %i dimensions\n", numberOfPoints, ptDimension);
}
#endif
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
float myhiz = 0;
for (long kk=k1;kk < k2; kk++ ) {
myhiz += dist(points->p[kk], points->p[0],
ptDimension)*points->p[kk].weight;
}
hizs[pid] = myhiz;
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
for( int i = 0; i < nproc; i++ ) {
hiz += hizs[i];
}
loz=0.0; z = (hiz+loz)/2.0;
/* NEW: Check whether more centers than points! */
if (points->num <= kmax) {
/* just return all points as facilities */
for (long kk=k1;kk<k2;kk++) {
points->p[kk].assign = kk;
points->p[kk].cost = 0;
}
cost = 0;
if( pid== 0 ) {
free(hizs);
*kfinal = k;
}
return cost;
}
if( pid == 0 ) shuffle(points);
cost = pspeedy(points, z, &k, pid, barrier);
#ifdef PRINTINFO
if( pid == 0 )
printf("thread %d: Finished first call to speedy, cost=%lf, k=%i\n",pid,cost,k);
#endif
i=0;
/* give speedy SP chances to get at least kmin/2 facilities */
while ((k < kmin)&&(i<SP)) {
cost = pspeedy(points, z, &k, pid, barrier);
i++;
}
#ifdef PRINTINFO
if( pid==0)
printf("thread %d: second call to speedy, cost=%lf, k=%d\n",pid,cost,k);
#endif
/* if still not enough facilities, assume z is too high */
while (k < kmin) {
#ifdef PRINTINFO
if( pid == 0 ) {
printf("%lf %lf\n", loz, hiz);
printf("Speedy indicates we should try lower z\n");
}
#endif
if (i >= SP) {hiz=z; z=(hiz+loz)/2.0; i=0;}
if( pid == 0 ) shuffle(points);
cost = pspeedy(points, z, &k, pid, barrier);
i++;
}
/* now we begin the binary search for real */
/* must designate some points as feasible centers */
/* this creates more consistancy between FL runs */
/* helps to guarantee correct # of centers at the end */
if( pid == 0 )
{
numfeasible = selectfeasible_fast(points,&feasible,kmin,pid,barrier);
for( int i = 0; i< points->num; i++ ) {
is_center[points->p[i].assign]= true;
}
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
while(1) {
#ifdef PRINTINFO
if( pid==0 )
{
printf("loz = %lf, hiz = %lf\n", loz, hiz);
printf("Running Local Search...\n");
}
#endif
/* first get a rough estimate on the FL solution */
// pthread_barrier_wait(barrier);
lastcost = cost;
cost = pFL(points, feasible, numfeasible,
z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.1, pid, barrier);
/* if number of centers seems good, try a more accurate FL */
if (((k <= (1.1)*kmax)&&(k >= (0.9)*kmin))||
((k <= kmax+2)&&(k >= kmin-2))) {
#ifdef PRINTINFO
if( pid== 0)
{
printf("Trying a more accurate local search...\n");
}
#endif
/* may need to run a little longer here before halting without
improvement */
cost = pFL(points, feasible, numfeasible,
z, &k, kmax, cost, (long)(ITER*kmax*log((float)kmax)), 0.001, pid, barrier);
}
if (k > kmax) {
/* facilities too cheap */
/* increase facility cost and up the cost accordingly */
loz = z; z = (hiz+loz)/2.0;
cost += (z-loz)*k;
}
if (k < kmin) {
/* facilities too expensive */
/* decrease facility cost and reduce the cost accordingly */
hiz = z; z = (hiz+loz)/2.0;
cost += (z-hiz)*k;
}
/* if k is good, return the result */
/* if we're stuck, just give up and return what we have */
if (((k <= kmax)&&(k >= kmin))||((loz >= (0.999)*hiz)) )
{
break;
}
#ifdef ENABLE_THREADS
pthread_barrier_wait(barrier);
#endif
}
//clean up...
if( pid==0 ) {
free(feasible);
free(hizs);
*kfinal = k;
}
return cost;
}
/* compute the means for the k clusters */
int contcenters(Points *points)
{
long i, ii;
float relweight;
for (i=0;i<points->num;i++) {
/* compute relative weight of this point to the cluster */
if (points->p[i].assign != i) {
relweight=points->p[points->p[i].assign].weight + points->p[i].weight;
relweight = points->p[i].weight/relweight;
for (ii=0;ii<points->dim;ii++) {
points->p[points->p[i].assign].coord[ii]*=1.0-relweight;
points->p[points->p[i].assign].coord[ii]+=
points->p[i].coord[ii]*relweight;
}
points->p[points->p[i].assign].weight += points->p[i].weight;
}
}
return 0;
}
/* copy centers from points to centers */
void copycenters(Points *points, Points* centers, long* centerIDs, long offset)
{
long i;
long k;
bool *is_a_median = (bool *) calloc(points->num, sizeof(bool));
/* mark the centers */
for ( i = 0; i < points->num; i++ ) {
is_a_median[points->p[i].assign] = 1;
}
k=centers->num;
/* count how many */
for ( i = 0; i < points->num; i++ ) {
if ( is_a_median[i] ) {
memcpy( centers->p[k].coord, points->p[i].coord, points->dim * sizeof(float));
centers->p[k].weight = points->p[i].weight;
centerIDs[k] = i + offset;
k++;
}
}
centers->num = k;
free(is_a_median);
}
void* localSearchSub(void* arg_) {
pkmedian_arg_t* arg= (pkmedian_arg_t*)arg_;
pkmedian(arg->points,arg->kmin,arg->kmax,arg->kfinal,arg->pid,arg->barrier);
return NULL;
}
void localSearch( Points* points, long kmin, long kmax, long* kfinal ) {
#ifdef PROFILE
double t1 = gettime();
#endif
pthread_barrier_t barrier;
#ifdef ENABLE_THREADS
pthread_barrier_init(&barrier,NULL,nproc);
#endif
pthread_t* threads = new pthread_t[nproc];
pkmedian_arg_t* arg = new pkmedian_arg_t[nproc];
for( int i = 0; i < nproc; i++ ) {
arg[i].points = points;
arg[i].kmin = kmin;
arg[i].kmax = kmax;
arg[i].pid = i;
arg[i].kfinal = kfinal;
arg[i].barrier = &barrier;
#ifdef ENABLE_THREADS
pthread_create(threads+i,NULL,localSearchSub,(void*)&arg[i]);
#else
localSearchSub(&arg[0]);
#endif
}
for ( int i = 0; i < nproc; i++) {
#ifdef ENABLE_THREADS
pthread_join(threads[i],NULL);
#endif
}
delete[] threads;
delete[] arg;
#ifdef ENABLE_THREADS
pthread_barrier_destroy(&barrier);
#endif
#ifdef PROFILE
double t2 = gettime();
time_local_search += t2-t1;
#endif
}
void outcenterIDs( Points* centers, long* centerIDs, char* outfile ) {
FILE* fp = fopen(outfile, "w");
if( fp==NULL ) {
fprintf(stderr, "error opening %s\n",outfile);
exit(1);
}
int* is_a_median = (int*)calloc( sizeof(int), centers->num );
for( int i =0 ; i< centers->num; i++ ) {
is_a_median[centers->p[i].assign] = 1;
}
for( int i = 0; i < centers->num; i++ ) {
if( is_a_median[i] ) {
fprintf(fp, "%u\n", centerIDs[i]);
fprintf(fp, "%lf\n", centers->p[i].weight);
for( int k = 0; k < centers->dim; k++ ) {
fprintf(fp, "%lf ", centers->p[i].coord[k]);
}
fprintf(fp,"\n\n");
}
}
fclose(fp);
}
void streamCluster( PStream* stream,
long kmin, long kmax, int dim,
long chunksize, long centersize, char* outfile )
{
float* block = (float*)malloc( chunksize*dim*sizeof(float) );
float* centerBlock = (float*)malloc(centersize*dim*sizeof(float) );
long* centerIDs = (long*)malloc(centersize*dim*sizeof(long));
if( block == NULL ) {
fprintf(stderr,"not enough memory for a chunk!\n");
exit(1);
}
Points points;
points.dim = dim;
points.num = chunksize;
cudaMallocHost((void **)&points.p,chunksize*sizeof(Point));
// points.p = (Point *)malloc(chunksize*sizeof(Point));
for( int i = 0; i < chunksize; i++ ) {
points.p[i].coord = &block[i*dim];
}
Points centers;
centers.dim = dim;
centers.p = (Point *)malloc(centersize*sizeof(Point));
centers.num = 0;
for( int i = 0; i< centersize; i++ ) {
centers.p[i].coord = ¢erBlock[i*dim];
centers.p[i].weight = 1.0;
}
long IDoffset = 0;
long kfinal;
while(1) {
size_t numRead = stream->read(block, dim, chunksize );
fprintf(stderr,"read %d points\n",numRead);
if( stream->ferror() || numRead < (unsigned int)chunksize && !stream->feof() ) {
fprintf(stderr, "error reading data!\n");
exit(1);
}
points.num = numRead;
for( int i = 0; i < points.num; i++ ) {
points.p[i].weight = 1.0;
}
switch_membership = (bool*)malloc(points.num*sizeof(bool));
is_center = (bool*)calloc(points.num,sizeof(bool));
// center_table = (int*)malloc(points.num*sizeof(int));
cudaMallocHost((void **)¢er_table,points.num*sizeof(int));
localSearch(&points,kmin, kmax,&kfinal);
fprintf(stderr,"finish local search\n");
contcenters(&points);
isCoordChanged = true;
if( kfinal + centers.num > centersize ) {
//here we don't handle the situation where # of centers gets too large.
fprintf(stderr,"oops! no more space for centers\n");
exit(1);
}
#ifdef PRINTINFO
printf("finish cont center\n");
#endif
copycenters(&points, ¢ers, centerIDs, IDoffset);
IDoffset += numRead;
#ifdef PRINTINFO
printf("finish copy centers\n");
#endif
free(is_center);
free(switch_membership);
// free(center_table);
cudaFree(center_table);
if( stream->feof() ) {
break;
}
}
//finally cluster all temp centers
switch_membership = (bool*)malloc(centers.num*sizeof(bool));
is_center = (bool*)calloc(centers.num,sizeof(bool));
// center_table = (int*)malloc(centers.num*sizeof(int));
cudaMallocHost((void **)¢er_table,centers.num*sizeof(int));
localSearch( ¢ers, kmin, kmax ,&kfinal );
contcenters(¢ers);
outcenterIDs( ¢ers, centerIDs, outfile);
}
int main(int argc, char **argv)
{
char *outfilename = new char[MAXNAMESIZE];
char *infilename = new char[MAXNAMESIZE];
long kmin, kmax, n, chunksize, clustersize;
int dim;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_streamcluster);
#endif
if (argc<10) {
fprintf(stderr,"usage: %s k1 k2 d n chunksize clustersize infile outfile nproc\n",
argv[0]);
fprintf(stderr," k1: Min. number of centers allowed\n");
fprintf(stderr," k2: Max. number of centers allowed\n");
fprintf(stderr," d: Dimension of each data point\n");
fprintf(stderr," n: Number of data points\n");
fprintf(stderr," chunksize: Number of data points to handle per step\n");
fprintf(stderr," clustersize: Maximum number of intermediate centers\n");
fprintf(stderr," infile: Input file (if n<=0)\n");
fprintf(stderr," outfile: Output file\n");
fprintf(stderr," nproc: Number of threads to use\n");
fprintf(stderr,"\n");
fprintf(stderr, "if n > 0, points will be randomly generated instead of reading from infile.\n");
exit(1);
}
kmin = atoi(argv[1]);
kmax = atoi(argv[2]);
dim = atoi(argv[3]);
n = atoi(argv[4]);
chunksize = atoi(argv[5]);
clustersize = atoi(argv[6]);
strcpy(infilename, argv[7]);
strcpy(outfilename, argv[8]);
nproc = atoi(argv[9]);
srand48(SEED);
PStream* stream;
if( n > 0 ) {
stream = new SimStream(n);
}
else {
stream = new FileStream(infilename);
}
double t1 = gettime();
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
serial_t = 0.0;
cpu_to_gpu_t = 0.0;
gpu_to_cpu_t = 0.0;
alloc_t = 0.0;
free_t = 0.0;
kernel_t = 0.0;
isCoordChanged = false;
streamCluster(stream, kmin, kmax, dim, chunksize, clustersize, outfilename );
freeDevMem();
freeHostMem();
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
double t2 = gettime();
printf("time = %lfs\n",t2-t1);
delete stream;
#ifdef PROFILE
printf("time pgain = %lfs\n", time_gain);
printf("time pgain_dist = %lfs\n", time_gain_dist);
printf("time pgain_init = %lfs\n", time_gain_init);
printf("time pselect = %lfs\n", time_select_feasible);
printf("time pspeedy = %lfs\n", time_speedy);
printf("time pshuffle = %lfs\n", time_shuffle);
printf("time localSearch = %lfs\n", time_local_search);
printf("\n\n");
printf("====CUDA Timing info (pgain)====\n");
printf("time serial = %lfs\n", serial_t/1000);
printf("time CPU to GPU memory copy = %lfs\n", cpu_to_gpu_t/1000);
printf("time GPU to CPU memory copy back = %lfs\n", gpu_to_cpu_t/1000);
printf("time GPU malloc = %lfs\n", alloc_t/1000);
printf("time GPU free = %lfs\n", free_t/1000);
printf("time kernel = %lfs\n", kernel_t/1000);
#endif
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
6439ac0dfcc1473cbb748d8249893d4acce71e8f.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFBase.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFBase::IVFBase(GpuResources* resources,
FlatIndex* quantizer,
int bytesPerVector,
IndicesOptions indicesOptions,
MemorySpace space) :
resources_(resources),
quantizer_(quantizer),
bytesPerVector_(bytesPerVector),
indicesOptions_(indicesOptions),
space_(space),
dim_(quantizer->getDim()),
numLists_(quantizer->getSize()),
maxListLength_(0) {
reset();
}
IVFBase::~IVFBase() {
}
void
IVFBase::reserveMemory(size_t numVecs) {
size_t vecsPerList = numVecs / deviceListData_.size();
if (vecsPerList < 1) {
return;
}
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t bytesPerDataList = vecsPerList * bytesPerVector_;
for (auto& list : deviceListData_) {
list->reserve(bytesPerDataList, stream);
}
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
// Reserve for index lists as well
size_t bytesPerIndexList = vecsPerList *
(indicesOptions_ == INDICES_32_BIT ? sizeof(int) : sizeof(long));
for (auto& list : deviceListIndices_) {
list->reserve(bytesPerIndexList, stream);
}
}
// Update device info for all lists, since the base pointers may
// have changed
updateDeviceListInfo_(stream);
}
void
IVFBase::reset() {
deviceListData_.clear();
deviceListIndices_.clear();
deviceListDataPointers_.clear();
deviceListIndexPointers_.clear();
deviceListLengths_.clear();
listOffsetToUserIndex_.clear();
for (size_t i = 0; i < numLists_; ++i) {
deviceListData_.emplace_back(
std::unique_ptr<DeviceVector<unsigned char>>(
new DeviceVector<unsigned char>(space_)));
deviceListIndices_.emplace_back(
std::unique_ptr<DeviceVector<unsigned char>>(
new DeviceVector<unsigned char>(space_)));
listOffsetToUserIndex_.emplace_back(std::vector<long>());
}
deviceListDataPointers_.resize(numLists_, nullptr);
deviceListIndexPointers_.resize(numLists_, nullptr);
deviceListLengths_.resize(numLists_, 0);
maxListLength_ = 0;
}
int
IVFBase::getDim() const {
return dim_;
}
size_t
IVFBase::reclaimMemory() {
// Reclaim all unused memory exactly
return reclaimMemory_(true);
}
size_t
IVFBase::reclaimMemory_(bool exact) {
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t totalReclaimed = 0;
for (int i = 0; i < deviceListData_.size(); ++i) {
auto& data = deviceListData_[i];
totalReclaimed += data->reclaim(exact, stream);
deviceListDataPointers_[i] = data->data();
}
for (int i = 0; i < deviceListIndices_.size(); ++i) {
auto& indices = deviceListIndices_[i];
totalReclaimed += indices->reclaim(exact, stream);
deviceListIndexPointers_[i] = indices->data();
}
// Update device info for all lists, since the base pointers may
// have changed
updateDeviceListInfo_(stream);
return totalReclaimed;
}
void
IVFBase::updateDeviceListInfo_(hipStream_t stream) {
std::vector<int> listIds(deviceListData_.size());
for (int i = 0; i < deviceListData_.size(); ++i) {
listIds[i] = i;
}
updateDeviceListInfo_(listIds, stream);
}
void
IVFBase::updateDeviceListInfo_(const std::vector<int>& listIds,
hipStream_t stream) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
HostTensor<int, 1, true>
hostListsToUpdate({(int) listIds.size()});
HostTensor<int, 1, true>
hostNewListLength({(int) listIds.size()});
HostTensor<void*, 1, true>
hostNewDataPointers({(int) listIds.size()});
HostTensor<void*, 1, true>
hostNewIndexPointers({(int) listIds.size()});
for (int i = 0; i < listIds.size(); ++i) {
auto listId = listIds[i];
auto& data = deviceListData_[listId];
auto& indices = deviceListIndices_[listId];
hostListsToUpdate[i] = listId;
hostNewListLength[i] = data->size() / bytesPerVector_;
hostNewDataPointers[i] = data->data();
hostNewIndexPointers[i] = indices->data();
}
// Copy the above update sets to the GPU
DeviceTensor<int, 1, true> listsToUpdate(
mem, hostListsToUpdate, stream);
DeviceTensor<int, 1, true> newListLength(
mem, hostNewListLength, stream);
DeviceTensor<void*, 1, true> newDataPointers(
mem, hostNewDataPointers, stream);
DeviceTensor<void*, 1, true> newIndexPointers(
mem, hostNewIndexPointers, stream);
// Update all pointers to the lists on the device that may have
// changed
runUpdateListPointers(listsToUpdate,
newListLength,
newDataPointers,
newIndexPointers,
deviceListLengths_,
deviceListDataPointers_,
deviceListIndexPointers_,
stream);
}
size_t
IVFBase::getNumLists() const {
return numLists_;
}
int
IVFBase::getListLength(int listId) const {
FAISS_ASSERT(listId < deviceListLengths_.size());
return deviceListLengths_[listId];
}
std::vector<long>
IVFBase::getListIndices(int listId) const {
FAISS_ASSERT(listId < numLists_);
if (indicesOptions_ == INDICES_32_BIT) {
FAISS_ASSERT(listId < deviceListIndices_.size());
auto intInd = deviceListIndices_[listId]->copyToHost<int>(
resources_->getDefaultStreamCurrentDevice());
std::vector<long> out(intInd.size());
for (size_t i = 0; i < intInd.size(); ++i) {
out[i] = (long) intInd[i];
}
return out;
} else if (indicesOptions_ == INDICES_64_BIT) {
FAISS_ASSERT(listId < deviceListIndices_.size());
return deviceListIndices_[listId]->copyToHost<long>(
resources_->getDefaultStreamCurrentDevice());
} else if (indicesOptions_ == INDICES_CPU) {
FAISS_ASSERT(listId < deviceListData_.size());
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIds = listOffsetToUserIndex_[listId];
FAISS_ASSERT(userIds.size() ==
deviceListData_[listId]->size() / bytesPerVector_);
// this will return a copy
return userIds;
} else {
// unhandled indices type (includes INDICES_IVF)
FAISS_ASSERT(false);
return std::vector<long>();
}
}
std::vector<unsigned char>
IVFBase::getListVectors(int listId) const {
FAISS_ASSERT(listId < deviceListData_.size());
auto& list = *deviceListData_[listId];
auto stream = resources_->getDefaultStreamCurrentDevice();
return list.copyToHost<unsigned char>(stream);
}
void
IVFBase::addIndicesFromCpu_(int listId,
const long* indices,
size_t numVecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
auto& listIndices = deviceListIndices_[listId];
auto prevIndicesData = listIndices->data();
if (indicesOptions_ == INDICES_32_BIT) {
// Make sure that all indices are in bounds
std::vector<int> indices32(numVecs);
for (size_t i = 0; i < numVecs; ++i) {
auto ind = indices[i];
FAISS_ASSERT(ind <= (long) std::numeric_limits<int>::max());
indices32[i] = (int) ind;
}
listIndices->append((unsigned char*) indices32.data(),
numVecs * sizeof(int),
stream,
true /* exact reserved size */);
} else if (indicesOptions_ == INDICES_64_BIT) {
listIndices->append((unsigned char*) indices,
numVecs * sizeof(long),
stream,
true /* exact reserved size */);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
userIndices.insert(userIndices.begin(), indices, indices + numVecs);
} else {
// indices are not stored
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
if (prevIndicesData != listIndices->data()) {
deviceListIndexPointers_[listId] = listIndices->data();
}
}
} } // namespace
|
6439ac0dfcc1473cbb748d8249893d4acce71e8f.cu
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFBase.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
namespace faiss { namespace gpu {
IVFBase::IVFBase(GpuResources* resources,
FlatIndex* quantizer,
int bytesPerVector,
IndicesOptions indicesOptions,
MemorySpace space) :
resources_(resources),
quantizer_(quantizer),
bytesPerVector_(bytesPerVector),
indicesOptions_(indicesOptions),
space_(space),
dim_(quantizer->getDim()),
numLists_(quantizer->getSize()),
maxListLength_(0) {
reset();
}
IVFBase::~IVFBase() {
}
void
IVFBase::reserveMemory(size_t numVecs) {
size_t vecsPerList = numVecs / deviceListData_.size();
if (vecsPerList < 1) {
return;
}
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t bytesPerDataList = vecsPerList * bytesPerVector_;
for (auto& list : deviceListData_) {
list->reserve(bytesPerDataList, stream);
}
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
// Reserve for index lists as well
size_t bytesPerIndexList = vecsPerList *
(indicesOptions_ == INDICES_32_BIT ? sizeof(int) : sizeof(long));
for (auto& list : deviceListIndices_) {
list->reserve(bytesPerIndexList, stream);
}
}
// Update device info for all lists, since the base pointers may
// have changed
updateDeviceListInfo_(stream);
}
void
IVFBase::reset() {
deviceListData_.clear();
deviceListIndices_.clear();
deviceListDataPointers_.clear();
deviceListIndexPointers_.clear();
deviceListLengths_.clear();
listOffsetToUserIndex_.clear();
for (size_t i = 0; i < numLists_; ++i) {
deviceListData_.emplace_back(
std::unique_ptr<DeviceVector<unsigned char>>(
new DeviceVector<unsigned char>(space_)));
deviceListIndices_.emplace_back(
std::unique_ptr<DeviceVector<unsigned char>>(
new DeviceVector<unsigned char>(space_)));
listOffsetToUserIndex_.emplace_back(std::vector<long>());
}
deviceListDataPointers_.resize(numLists_, nullptr);
deviceListIndexPointers_.resize(numLists_, nullptr);
deviceListLengths_.resize(numLists_, 0);
maxListLength_ = 0;
}
int
IVFBase::getDim() const {
return dim_;
}
size_t
IVFBase::reclaimMemory() {
// Reclaim all unused memory exactly
return reclaimMemory_(true);
}
size_t
IVFBase::reclaimMemory_(bool exact) {
auto stream = resources_->getDefaultStreamCurrentDevice();
size_t totalReclaimed = 0;
for (int i = 0; i < deviceListData_.size(); ++i) {
auto& data = deviceListData_[i];
totalReclaimed += data->reclaim(exact, stream);
deviceListDataPointers_[i] = data->data();
}
for (int i = 0; i < deviceListIndices_.size(); ++i) {
auto& indices = deviceListIndices_[i];
totalReclaimed += indices->reclaim(exact, stream);
deviceListIndexPointers_[i] = indices->data();
}
// Update device info for all lists, since the base pointers may
// have changed
updateDeviceListInfo_(stream);
return totalReclaimed;
}
void
IVFBase::updateDeviceListInfo_(cudaStream_t stream) {
std::vector<int> listIds(deviceListData_.size());
for (int i = 0; i < deviceListData_.size(); ++i) {
listIds[i] = i;
}
updateDeviceListInfo_(listIds, stream);
}
void
IVFBase::updateDeviceListInfo_(const std::vector<int>& listIds,
cudaStream_t stream) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
HostTensor<int, 1, true>
hostListsToUpdate({(int) listIds.size()});
HostTensor<int, 1, true>
hostNewListLength({(int) listIds.size()});
HostTensor<void*, 1, true>
hostNewDataPointers({(int) listIds.size()});
HostTensor<void*, 1, true>
hostNewIndexPointers({(int) listIds.size()});
for (int i = 0; i < listIds.size(); ++i) {
auto listId = listIds[i];
auto& data = deviceListData_[listId];
auto& indices = deviceListIndices_[listId];
hostListsToUpdate[i] = listId;
hostNewListLength[i] = data->size() / bytesPerVector_;
hostNewDataPointers[i] = data->data();
hostNewIndexPointers[i] = indices->data();
}
// Copy the above update sets to the GPU
DeviceTensor<int, 1, true> listsToUpdate(
mem, hostListsToUpdate, stream);
DeviceTensor<int, 1, true> newListLength(
mem, hostNewListLength, stream);
DeviceTensor<void*, 1, true> newDataPointers(
mem, hostNewDataPointers, stream);
DeviceTensor<void*, 1, true> newIndexPointers(
mem, hostNewIndexPointers, stream);
// Update all pointers to the lists on the device that may have
// changed
runUpdateListPointers(listsToUpdate,
newListLength,
newDataPointers,
newIndexPointers,
deviceListLengths_,
deviceListDataPointers_,
deviceListIndexPointers_,
stream);
}
size_t
IVFBase::getNumLists() const {
return numLists_;
}
int
IVFBase::getListLength(int listId) const {
FAISS_ASSERT(listId < deviceListLengths_.size());
return deviceListLengths_[listId];
}
std::vector<long>
IVFBase::getListIndices(int listId) const {
FAISS_ASSERT(listId < numLists_);
if (indicesOptions_ == INDICES_32_BIT) {
FAISS_ASSERT(listId < deviceListIndices_.size());
auto intInd = deviceListIndices_[listId]->copyToHost<int>(
resources_->getDefaultStreamCurrentDevice());
std::vector<long> out(intInd.size());
for (size_t i = 0; i < intInd.size(); ++i) {
out[i] = (long) intInd[i];
}
return out;
} else if (indicesOptions_ == INDICES_64_BIT) {
FAISS_ASSERT(listId < deviceListIndices_.size());
return deviceListIndices_[listId]->copyToHost<long>(
resources_->getDefaultStreamCurrentDevice());
} else if (indicesOptions_ == INDICES_CPU) {
FAISS_ASSERT(listId < deviceListData_.size());
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIds = listOffsetToUserIndex_[listId];
FAISS_ASSERT(userIds.size() ==
deviceListData_[listId]->size() / bytesPerVector_);
// this will return a copy
return userIds;
} else {
// unhandled indices type (includes INDICES_IVF)
FAISS_ASSERT(false);
return std::vector<long>();
}
}
std::vector<unsigned char>
IVFBase::getListVectors(int listId) const {
FAISS_ASSERT(listId < deviceListData_.size());
auto& list = *deviceListData_[listId];
auto stream = resources_->getDefaultStreamCurrentDevice();
return list.copyToHost<unsigned char>(stream);
}
void
IVFBase::addIndicesFromCpu_(int listId,
const long* indices,
size_t numVecs) {
auto stream = resources_->getDefaultStreamCurrentDevice();
auto& listIndices = deviceListIndices_[listId];
auto prevIndicesData = listIndices->data();
if (indicesOptions_ == INDICES_32_BIT) {
// Make sure that all indices are in bounds
std::vector<int> indices32(numVecs);
for (size_t i = 0; i < numVecs; ++i) {
auto ind = indices[i];
FAISS_ASSERT(ind <= (long) std::numeric_limits<int>::max());
indices32[i] = (int) ind;
}
listIndices->append((unsigned char*) indices32.data(),
numVecs * sizeof(int),
stream,
true /* exact reserved size */);
} else if (indicesOptions_ == INDICES_64_BIT) {
listIndices->append((unsigned char*) indices,
numVecs * sizeof(long),
stream,
true /* exact reserved size */);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
userIndices.insert(userIndices.begin(), indices, indices + numVecs);
} else {
// indices are not stored
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
if (prevIndicesData != listIndices->data()) {
deviceListIndexPointers_[listId] = listIndices->data();
}
}
} } // namespace
|
c36fc4fce837508acacdb2e8763fd8d71588fd8c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* TruncateThresholdFilter.cpp
*
* Created on: Sep 17, 2012
* Author: avo
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include "point_info.hpp"
#include "TruncateThresholdFilter.h"
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include <stdio.h>
namespace device
{
struct ThresholdTruncator
{
float4* pos;
float min;
float max;
__device__ __forceinline__ void
operator () () const
{
int sx = blockIdx.x*blockDim.x+threadIdx.x;
int sy = blockIdx.y*blockDim.y+threadIdx.y;
int off = blockIdx.z*640*480+sy*640+sx;
// if(blockIdx.x==10&&blockIdx.y==10&&threadIdx.x==10&&threadIdx.y==10)
// printf(" %f %f %f \n",pos[off].x,pos[off].y,pos[off].z);
float4 wc = pos[off];
SegmentationPointInfo spi;
spi = Foreground;
if(wc.z < min || wc.z > max)
spi = Background;
device::setSegmentationPointInfo(wc.w,spi);
pos[off] = wc;
}
};
__global__ void filterTruncateThreshold(ThresholdTruncator tt) { tt (); }
}
device::ThresholdTruncator truncator;
template <typename T>
struct clamp : public thrust::unary_function<T,T>
{
T lo, hi;
__host__ __device__
clamp(T _lo, T _hi) : lo(_lo), hi(_hi) {}
__host__ __device__
T operator()(T x)
{
if (x < lo)
return lo;
else if (x < hi)
return x;
else
return hi;
}
};
void TruncateThresholdFilter::init()
{
truncator.pos = (float4 *)getInputDataPointer(0);
truncator.min = min;
truncator.max = max;
block = dim3(32,24);
grid = dim3(640/block.x,480/block.y,n_view);
}
void TruncateThresholdFilter::execute()
{
hipLaunchKernelGGL(( device::filterTruncateThreshold), dim3(grid),dim3(block), 0, 0, truncator);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
size_t uc4s = 640*480*sizeof(uchar4);
char path[50];
for(int v=0;v<n_view;v++)
{
float4 *h_f4_depth = (float4 *)malloc(640*480*sizeof(float4));
checkCudaErrors(hipMemcpy(h_f4_depth,truncator.pos+v*640*480,640*480*sizeof(float4),hipMemcpyDeviceToHost));
// uchar4 *h_uc4_depth = (uchar4 *)malloc(uc4s);
// for(int i=0;i<640*480;i++)
// {
// unsigned char g = h_f4_depth[i].z/20;
// h_uc4_depth[i] = make_uchar4(g,g,g,128);
//
// if(!device::isValid(h_f4_depth[i].w)) h_uc4_depth[i].x = 255;
//
// if(device::isReconstructed(h_f4_depth[i].w)) h_uc4_depth[i].y = 255;
// }
//
// sprintf(path,"/home/avo/pcds/src_depth_valid_map%d.ppm",0);
// sdkSavePPM4ub(path,(unsigned char*)h_uc4_depth,640,480);
uchar4 *h_uc4_depth2 = (uchar4 *)malloc(uc4s);
for(int i=0;i<640*480;i++)
{
unsigned char g = h_f4_depth[i].z/20;
h_uc4_depth2[i] = make_uchar4(g,g,g,128);
if(device::isForeground(h_f4_depth[i].w)) h_uc4_depth2[i].x = 255;
if(device::isBackground(h_f4_depth[i].w)) h_uc4_depth2[i].y = 255;
if(!device::isSegmented(h_f4_depth[i].w)) h_uc4_depth2[i].z = 255;
}
sprintf(path,"/home/avo/pcds/src_segmented_map%d.ppm",v);
sdkSavePPM4ub(path,(unsigned char*)h_uc4_depth2,640,480);
}
}
|
c36fc4fce837508acacdb2e8763fd8d71588fd8c.cu
|
/*
* TruncateThresholdFilter.cpp
*
* Created on: Sep 17, 2012
* Author: avo
*/
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include "point_info.hpp"
#include "TruncateThresholdFilter.h"
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include <stdio.h>
namespace device
{
struct ThresholdTruncator
{
float4* pos;
float min;
float max;
__device__ __forceinline__ void
operator () () const
{
int sx = blockIdx.x*blockDim.x+threadIdx.x;
int sy = blockIdx.y*blockDim.y+threadIdx.y;
int off = blockIdx.z*640*480+sy*640+sx;
// if(blockIdx.x==10&&blockIdx.y==10&&threadIdx.x==10&&threadIdx.y==10)
// printf(" %f %f %f \n",pos[off].x,pos[off].y,pos[off].z);
float4 wc = pos[off];
SegmentationPointInfo spi;
spi = Foreground;
if(wc.z < min || wc.z > max)
spi = Background;
device::setSegmentationPointInfo(wc.w,spi);
pos[off] = wc;
}
};
__global__ void filterTruncateThreshold(ThresholdTruncator tt) { tt (); }
}
device::ThresholdTruncator truncator;
template <typename T>
struct clamp : public thrust::unary_function<T,T>
{
T lo, hi;
__host__ __device__
clamp(T _lo, T _hi) : lo(_lo), hi(_hi) {}
__host__ __device__
T operator()(T x)
{
if (x < lo)
return lo;
else if (x < hi)
return x;
else
return hi;
}
};
void TruncateThresholdFilter::init()
{
truncator.pos = (float4 *)getInputDataPointer(0);
truncator.min = min;
truncator.max = max;
block = dim3(32,24);
grid = dim3(640/block.x,480/block.y,n_view);
}
void TruncateThresholdFilter::execute()
{
device::filterTruncateThreshold<<<grid,block>>>(truncator);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
size_t uc4s = 640*480*sizeof(uchar4);
char path[50];
for(int v=0;v<n_view;v++)
{
float4 *h_f4_depth = (float4 *)malloc(640*480*sizeof(float4));
checkCudaErrors(cudaMemcpy(h_f4_depth,truncator.pos+v*640*480,640*480*sizeof(float4),cudaMemcpyDeviceToHost));
// uchar4 *h_uc4_depth = (uchar4 *)malloc(uc4s);
// for(int i=0;i<640*480;i++)
// {
// unsigned char g = h_f4_depth[i].z/20;
// h_uc4_depth[i] = make_uchar4(g,g,g,128);
//
// if(!device::isValid(h_f4_depth[i].w)) h_uc4_depth[i].x = 255;
//
// if(device::isReconstructed(h_f4_depth[i].w)) h_uc4_depth[i].y = 255;
// }
//
// sprintf(path,"/home/avo/pcds/src_depth_valid_map%d.ppm",0);
// sdkSavePPM4ub(path,(unsigned char*)h_uc4_depth,640,480);
uchar4 *h_uc4_depth2 = (uchar4 *)malloc(uc4s);
for(int i=0;i<640*480;i++)
{
unsigned char g = h_f4_depth[i].z/20;
h_uc4_depth2[i] = make_uchar4(g,g,g,128);
if(device::isForeground(h_f4_depth[i].w)) h_uc4_depth2[i].x = 255;
if(device::isBackground(h_f4_depth[i].w)) h_uc4_depth2[i].y = 255;
if(!device::isSegmented(h_f4_depth[i].w)) h_uc4_depth2[i].z = 255;
}
sprintf(path,"/home/avo/pcds/src_segmented_map%d.ppm",v);
sdkSavePPM4ub(path,(unsigned char*)h_uc4_depth2,640,480);
}
}
|
3705a7e0470c78254e91c4f600d452ba0b148853.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Histogram Equalization
#include <wb.h>
#define HISTOGRAM_LENGTH 256
#define THREADS 256
#define BLOCK_SIZE 256
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
//@@ insert code here
/* casts the input to unsigned char while also converting to grayscale*/
__global__ void FloatToUnsignedChar(float* input, unsigned char* uchar, unsigned char* grayImage, int height, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x*blockDim.x + tx; //column index
int row = blockIdx.y*blockDim.y + ty; //row index
int idx = row*width + col; //compute current position --similar to first few mps
unsigned char r, g, b;
//extract r, g, and b values while simulateneously casting to unsigned char
if (row < height && col < width)
{
uchar[3 * idx] = r = (unsigned char)(255 * input[3 * idx]);
uchar[3 * idx + 1] = g = (unsigned char)(255 * input[3 * idx + 1]);
uchar[3 * idx + 2] = b = (unsigned char)(255 * input[3 * idx + 2]);
//convert to grayscale
grayImage[idx] = (unsigned char)(0.21*r + 0.71*g + 0.07*b);
}
}
__global__ void ComputeHistogram(unsigned char* grayImage, int* histogram, int size)
{
//set up shared memory
__shared__ int histogram_private[HISTOGRAM_LENGTH];
int tx = threadIdx.x;
int i = blockIdx.x*blockDim.x + tx;
if (tx < HISTOGRAM_LENGTH)
{
histogram_private[tx] = 0;
}
__syncthreads();
//compute private histogram
int stride = blockDim.x*gridDim.x;
while (i < size)
{
atomicAdd(&(histogram_private[grayImage[i]]), 1);
i += stride;
}
__syncthreads();
//add to the public histrogram
if (tx < HISTOGRAM_LENGTH)
{
atomicAdd(&(histogram[tx]), histogram_private[tx]);
}
}
/*modified scan operation code from mp5*/
__global__ void scan(int *histogram, float *cdf, int size)
{
__shared__ float partialSum[2*BLOCK_SIZE];
int tx = threadIdx.x;
//@@ Load a segment of the input vector into shared memory
//each thread loads 2 elements into shared memory. Put the identity (0 for sum) in if outside of boundary
if (tx < HISTOGRAM_LENGTH)
{
partialSum[tx] = float(float(histogram[tx]) / (size)); //compute probability
}
else
{
partialSum[tx] = 0;
partialSum[tx+BLOCK_SIZE] = 0;
}
__syncthreads();
//printf(" %d ", histogram[tx]);
/*reduction phase*/
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2)
{
int index = (tx + 1)*stride * 2 - 1;
if (index < 2 * BLOCK_SIZE)
{
partialSum[index] += partialSum[index - stride];
}
__syncthreads();
}
/*post reduction reverse phase*/
for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2)
{
__syncthreads();
int index = (tx + 1)*stride * 2 - 1;
if (index + stride < 2 * BLOCK_SIZE)
{
partialSum[index + stride] += partialSum[index];
}
}
__syncthreads(); //partialSum now holds the cdf
//write to output
if (tx < HISTOGRAM_LENGTH)
{
cdf[tx] = partialSum[tx];
}
}
/*compute and apply the histogram equalization then cast back to float*/
__global__ void histogram_equalization(unsigned char* uchar, float* cdf, float* output, int height, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x*blockDim.x + tx; //column index
int row = blockIdx.y*blockDim.y + ty; //row index
int idx = row*width + col; //compute current position --similar to first few mps
float cdfmin = cdf[0]; //extract cdf min
if (row < height && col < width)
{
//apply histogram equalization function
uchar[3 * idx] = min(max(255 * (cdf[uchar[3 * idx]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
uchar[3 * idx + 1] = min(max(255 * (cdf[uchar[3 * idx + 1]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
uchar[3 * idx + 2] = min(max(255 * (cdf[uchar[3 * idx + 2]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
//cast back to float
output[3 * idx] = float(uchar[3 * idx] / 255.0);
output[3 * idx + 1] = float(uchar[3 * idx + 1] / 255.0);
output[3 * idx + 2] = float(uchar[3 * idx + 2] / 255.0);
}
}
int main(int argc, char **argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
const char *inputImageFile;
float* deviceInput;
float* deviceOutput;
unsigned char* deviceUchar;
unsigned char* deviceGrayImage;
int* deviceHistogram;
float* deviceCDF;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
hostInputImageData = wbImage_getData(inputImage); //added this
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
hostOutputImageData = (float *)malloc(imageWidth*imageHeight*imageChannels * sizeof(float));
wbCheck(hipMalloc(&deviceInput, (imageWidth*imageHeight*imageChannels*sizeof(float))));
wbCheck(hipMalloc(&deviceOutput, (imageWidth*imageHeight*imageChannels*sizeof(float))));
wbCheck(hipMalloc(&deviceUchar, (imageWidth*imageHeight*imageChannels*sizeof(unsigned char))));
wbCheck(hipMalloc(&deviceGrayImage, (imageWidth*imageHeight*sizeof(unsigned char))));
wbCheck(hipMalloc(&deviceHistogram, (HISTOGRAM_LENGTH*sizeof(int))));
wbCheck(hipMalloc(&deviceCDF, (HISTOGRAM_LENGTH*sizeof(float))));
wbCheck(hipMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(int))); //initialize histogram to 0
wbCheck(hipMemset(deviceCDF, 0, HISTOGRAM_LENGTH * sizeof(float)));
wbCheck(hipMemcpy(deviceInput, hostInputImageData, (imageWidth*imageHeight*imageChannels*sizeof(float)), hipMemcpyHostToDevice));
//use square root of 256 for number of threads in each direction
dim3 grid((imageWidth-1)/16 + 1, (imageHeight - 1) / 16 + 1, 1);
dim3 threads(16, 16, 1);
//cast to unsigned char and convert to grayscale
hipLaunchKernelGGL(( FloatToUnsignedChar), dim3(grid), dim3(threads), 0, 0, deviceInput, deviceUchar, deviceGrayImage, imageHeight, imageWidth);
dim3 grid2((imageWidth*imageHeight - 1) / THREADS + 1, 1, 1);
dim3 threads2(THREADS, 1, 1);
//compute the histogram
hipLaunchKernelGGL(( ComputeHistogram), dim3(grid2), dim3(threads2), 0, 0, deviceGrayImage, deviceHistogram, imageHeight*imageWidth);
dim3 grid3(1, 1, 1);
//compute the cdf
hipLaunchKernelGGL(( scan), dim3(grid3), dim3(threads2), 0, 0, deviceHistogram, deviceCDF, imageHeight*imageWidth);
//printf("size: %d", imageHeight*imageWidth);
//find and apply the histogram equalization
hipLaunchKernelGGL(( histogram_equalization), dim3(grid), dim3(threads) , 0, 0, deviceUchar, deviceCDF, deviceOutput, imageHeight, imageWidth);
hipError_t code = hipGetLastError();
if (code != hipSuccess) { printf("CUDA error: %s\n", hipGetErrorString(code)); }
wbCheck(hipMemcpy(hostOutputImageData, deviceOutput, imageHeight*imageWidth*imageChannels*sizeof(float), hipMemcpyDeviceToHost));
wbImage_setData(outputImage, hostOutputImageData); //added this
wbSolution(args, outputImage);
//@@ insert code here
//free memory
hipFree(deviceInput);
hipFree(deviceOutput);
hipFree(deviceHistogram);
hipFree(deviceCDF);
hipFree(deviceUchar);
hipFree(deviceGrayImage);
free(hostInputImageData);
free(hostOutputImageData);
return 0;
}
|
3705a7e0470c78254e91c4f600d452ba0b148853.cu
|
// Histogram Equalization
#include <wb.h>
#define HISTOGRAM_LENGTH 256
#define THREADS 256
#define BLOCK_SIZE 256
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
//@@ insert code here
/* casts the input to unsigned char while also converting to grayscale*/
__global__ void FloatToUnsignedChar(float* input, unsigned char* uchar, unsigned char* grayImage, int height, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x*blockDim.x + tx; //column index
int row = blockIdx.y*blockDim.y + ty; //row index
int idx = row*width + col; //compute current position --similar to first few mps
unsigned char r, g, b;
//extract r, g, and b values while simulateneously casting to unsigned char
if (row < height && col < width)
{
uchar[3 * idx] = r = (unsigned char)(255 * input[3 * idx]);
uchar[3 * idx + 1] = g = (unsigned char)(255 * input[3 * idx + 1]);
uchar[3 * idx + 2] = b = (unsigned char)(255 * input[3 * idx + 2]);
//convert to grayscale
grayImage[idx] = (unsigned char)(0.21*r + 0.71*g + 0.07*b);
}
}
__global__ void ComputeHistogram(unsigned char* grayImage, int* histogram, int size)
{
//set up shared memory
__shared__ int histogram_private[HISTOGRAM_LENGTH];
int tx = threadIdx.x;
int i = blockIdx.x*blockDim.x + tx;
if (tx < HISTOGRAM_LENGTH)
{
histogram_private[tx] = 0;
}
__syncthreads();
//compute private histogram
int stride = blockDim.x*gridDim.x;
while (i < size)
{
atomicAdd(&(histogram_private[grayImage[i]]), 1);
i += stride;
}
__syncthreads();
//add to the public histrogram
if (tx < HISTOGRAM_LENGTH)
{
atomicAdd(&(histogram[tx]), histogram_private[tx]);
}
}
/*modified scan operation code from mp5*/
__global__ void scan(int *histogram, float *cdf, int size)
{
__shared__ float partialSum[2*BLOCK_SIZE];
int tx = threadIdx.x;
//@@ Load a segment of the input vector into shared memory
//each thread loads 2 elements into shared memory. Put the identity (0 for sum) in if outside of boundary
if (tx < HISTOGRAM_LENGTH)
{
partialSum[tx] = float(float(histogram[tx]) / (size)); //compute probability
}
else
{
partialSum[tx] = 0;
partialSum[tx+BLOCK_SIZE] = 0;
}
__syncthreads();
//printf(" %d ", histogram[tx]);
/*reduction phase*/
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2)
{
int index = (tx + 1)*stride * 2 - 1;
if (index < 2 * BLOCK_SIZE)
{
partialSum[index] += partialSum[index - stride];
}
__syncthreads();
}
/*post reduction reverse phase*/
for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2)
{
__syncthreads();
int index = (tx + 1)*stride * 2 - 1;
if (index + stride < 2 * BLOCK_SIZE)
{
partialSum[index + stride] += partialSum[index];
}
}
__syncthreads(); //partialSum now holds the cdf
//write to output
if (tx < HISTOGRAM_LENGTH)
{
cdf[tx] = partialSum[tx];
}
}
/*compute and apply the histogram equalization then cast back to float*/
__global__ void histogram_equalization(unsigned char* uchar, float* cdf, float* output, int height, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x*blockDim.x + tx; //column index
int row = blockIdx.y*blockDim.y + ty; //row index
int idx = row*width + col; //compute current position --similar to first few mps
float cdfmin = cdf[0]; //extract cdf min
if (row < height && col < width)
{
//apply histogram equalization function
uchar[3 * idx] = min(max(255 * (cdf[uchar[3 * idx]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
uchar[3 * idx + 1] = min(max(255 * (cdf[uchar[3 * idx + 1]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
uchar[3 * idx + 2] = min(max(255 * (cdf[uchar[3 * idx + 2]] - cdfmin) / (1 - cdfmin), 0.0), 255.0);
//cast back to float
output[3 * idx] = float(uchar[3 * idx] / 255.0);
output[3 * idx + 1] = float(uchar[3 * idx + 1] / 255.0);
output[3 * idx + 2] = float(uchar[3 * idx + 2] / 255.0);
}
}
int main(int argc, char **argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
const char *inputImageFile;
float* deviceInput;
float* deviceOutput;
unsigned char* deviceUchar;
unsigned char* deviceGrayImage;
int* deviceHistogram;
float* deviceCDF;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
hostInputImageData = wbImage_getData(inputImage); //added this
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
hostOutputImageData = (float *)malloc(imageWidth*imageHeight*imageChannels * sizeof(float));
wbCheck(cudaMalloc(&deviceInput, (imageWidth*imageHeight*imageChannels*sizeof(float))));
wbCheck(cudaMalloc(&deviceOutput, (imageWidth*imageHeight*imageChannels*sizeof(float))));
wbCheck(cudaMalloc(&deviceUchar, (imageWidth*imageHeight*imageChannels*sizeof(unsigned char))));
wbCheck(cudaMalloc(&deviceGrayImage, (imageWidth*imageHeight*sizeof(unsigned char))));
wbCheck(cudaMalloc(&deviceHistogram, (HISTOGRAM_LENGTH*sizeof(int))));
wbCheck(cudaMalloc(&deviceCDF, (HISTOGRAM_LENGTH*sizeof(float))));
wbCheck(cudaMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(int))); //initialize histogram to 0
wbCheck(cudaMemset(deviceCDF, 0, HISTOGRAM_LENGTH * sizeof(float)));
wbCheck(cudaMemcpy(deviceInput, hostInputImageData, (imageWidth*imageHeight*imageChannels*sizeof(float)), cudaMemcpyHostToDevice));
//use square root of 256 for number of threads in each direction
dim3 grid((imageWidth-1)/16 + 1, (imageHeight - 1) / 16 + 1, 1);
dim3 threads(16, 16, 1);
//cast to unsigned char and convert to grayscale
FloatToUnsignedChar<<<grid, threads>>>(deviceInput, deviceUchar, deviceGrayImage, imageHeight, imageWidth);
dim3 grid2((imageWidth*imageHeight - 1) / THREADS + 1, 1, 1);
dim3 threads2(THREADS, 1, 1);
//compute the histogram
ComputeHistogram<<<grid2, threads2>>>(deviceGrayImage, deviceHistogram, imageHeight*imageWidth);
dim3 grid3(1, 1, 1);
//compute the cdf
scan<<<grid3, threads2>>>(deviceHistogram, deviceCDF, imageHeight*imageWidth);
//printf("size: %d", imageHeight*imageWidth);
//find and apply the histogram equalization
histogram_equalization<<<grid, threads >>>(deviceUchar, deviceCDF, deviceOutput, imageHeight, imageWidth);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(code)); }
wbCheck(cudaMemcpy(hostOutputImageData, deviceOutput, imageHeight*imageWidth*imageChannels*sizeof(float), cudaMemcpyDeviceToHost));
wbImage_setData(outputImage, hostOutputImageData); //added this
wbSolution(args, outputImage);
//@@ insert code here
//free memory
cudaFree(deviceInput);
cudaFree(deviceOutput);
cudaFree(deviceHistogram);
cudaFree(deviceCDF);
cudaFree(deviceUchar);
cudaFree(deviceGrayImage);
free(hostInputImageData);
free(hostOutputImageData);
return 0;
}
|
0f420e70fb1179f462228cfe01ba55327eb66b2e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__device__
int lower_bound(int *a, int size_a, int x){
if(x<=a[0])return 0;
if(a[size_a-1]<x)return size_a;
int ini=1, end=size_a;
int mid;
while(ini<=end){
mid = (ini+end)/2;
if(a[mid-1]<x && x<=a[mid])return mid;
if(a[mid]<=x)ini=mid;
else end=mid-1;
}
return 0;
}
__device__
int upper_bound(int *a, int size_a, int x){
if(a[size_a-1]<=x)return size_a;
if(x<a[0])return 0;
int ini=0, end=size_a-1;
int mid;
while(ini<end){
mid = (ini+end)/2;
if(a[mid]<=x && x<a[mid+1])return mid+1;
if(x>=a[mid])ini=mid+1;
else end=mid;
}
return size_a;
}
__global__
void parallel_merge(int *a, int *c, int l, int m , int r){
int pos;
int tid = threadIdx.x+blockDim.x*blockIdx.x;
int slide = blockDim.x*gridDim.x;
for(int i=l+tid;i<m;i+=slide){
pos = i + upper_bound(a+m, r-m, a[i])-l;
c[pos+l] = a[i];
}
for(int i=m+tid;i<r;i+=slide){
pos = i + lower_bound(a+l, m-l, a[i])-m;
c[pos+l] = a[i];
}
}
void merge_sort(int* arr, int n){
int *arr_device, *c_device, *tmp_device;
hipMalloc(&c_device, n*sizeof(int));
hipMalloc(&arr_device, n*sizeof(int));
hipMemcpy(arr_device, arr, n*sizeof(int), hipMemcpyHostToDevice);
int deviceId, warg_size;
hipGetDevice(&deviceId);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, deviceId);
warg_size = props.multiProcessorCount;
int num_threads = 128, num_blocks = 4*warg_size;
for(int size=1;size<=n-1;size = 2*size){
for(int l=0;l<n-1;l+=2*size){
hipStream_t stream;
hipStreamCreate(&stream);
int m = min(l+size-1, n-1);
int r = min(l+2*size-1, n-1);
hipLaunchKernelGGL(( parallel_merge), dim3(num_blocks),dim3(num_threads), 0, stream, arr_device, c_device, l, m+1, r+1);
}
hipDeviceSynchronize();
tmp_device = arr_device;
arr_device = c_device;
c_device = tmp_device;
}
hipDeviceSynchronize();
hipMemcpy(arr, arr_device, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(arr_device);
hipFree(c_device);
}
int main(int argc,char** argv){
int *v;
int n;
if(argc>1)n = 1<<atoi(argv[1]);
else return 0;
v = (int*)malloc(n*sizeof(int));
for(int i=0;i<n;i++)v[i]=rand()%50;
merge_sort(v, n);
for(int i=0;i<n;i++){
printf("%d ", v[i]);
}
printf("\n");
free(v);
return 0;
}
|
0f420e70fb1179f462228cfe01ba55327eb66b2e.cu
|
#include<stdio.h>
#include<stdlib.h>
__device__
int lower_bound(int *a, int size_a, int x){
if(x<=a[0])return 0;
if(a[size_a-1]<x)return size_a;
int ini=1, end=size_a;
int mid;
while(ini<=end){
mid = (ini+end)/2;
if(a[mid-1]<x && x<=a[mid])return mid;
if(a[mid]<=x)ini=mid;
else end=mid-1;
}
return 0;
}
__device__
int upper_bound(int *a, int size_a, int x){
if(a[size_a-1]<=x)return size_a;
if(x<a[0])return 0;
int ini=0, end=size_a-1;
int mid;
while(ini<end){
mid = (ini+end)/2;
if(a[mid]<=x && x<a[mid+1])return mid+1;
if(x>=a[mid])ini=mid+1;
else end=mid;
}
return size_a;
}
__global__
void parallel_merge(int *a, int *c, int l, int m , int r){
int pos;
int tid = threadIdx.x+blockDim.x*blockIdx.x;
int slide = blockDim.x*gridDim.x;
for(int i=l+tid;i<m;i+=slide){
pos = i + upper_bound(a+m, r-m, a[i])-l;
c[pos+l] = a[i];
}
for(int i=m+tid;i<r;i+=slide){
pos = i + lower_bound(a+l, m-l, a[i])-m;
c[pos+l] = a[i];
}
}
void merge_sort(int* arr, int n){
int *arr_device, *c_device, *tmp_device;
cudaMalloc(&c_device, n*sizeof(int));
cudaMalloc(&arr_device, n*sizeof(int));
cudaMemcpy(arr_device, arr, n*sizeof(int), cudaMemcpyHostToDevice);
int deviceId, warg_size;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
warg_size = props.multiProcessorCount;
int num_threads = 128, num_blocks = 4*warg_size;
for(int size=1;size<=n-1;size = 2*size){
for(int l=0;l<n-1;l+=2*size){
cudaStream_t stream;
cudaStreamCreate(&stream);
int m = min(l+size-1, n-1);
int r = min(l+2*size-1, n-1);
parallel_merge<<<num_blocks,num_threads, 0, stream>>>(arr_device, c_device, l, m+1, r+1);
}
cudaDeviceSynchronize();
tmp_device = arr_device;
arr_device = c_device;
c_device = tmp_device;
}
cudaDeviceSynchronize();
cudaMemcpy(arr, arr_device, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(arr_device);
cudaFree(c_device);
}
int main(int argc,char** argv){
int *v;
int n;
if(argc>1)n = 1<<atoi(argv[1]);
else return 0;
v = (int*)malloc(n*sizeof(int));
for(int i=0;i<n;i++)v[i]=rand()%50;
merge_sort(v, n);
for(int i=0;i<n;i++){
printf("%d ", v[i]);
}
printf("\n");
free(v);
return 0;
}
|
130e2a7ca41da9e8a12d4eb90da453a2e9e57ed9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include <string.h>
#include "stdlib.h"
#include<math.h>
#include"time.h"
#include"sys/time.h"
#include<cuda.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<mpi.h>
#include"canny_edge.h"
/*
struct location {
int locationI;
int locationJ;
};
int compare(const void *a, const void *b){
float c = *(float*)a;
float d = *(float*)b;
if(c < d) return -1;
else return 1;
}
void print_matrix(float *matrix, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
printf("%.3f ", *(matrix+(i*width)+j));
}
printf("\n");
}
}
int range(int y, int x, int h, int w){
if(x < 0 || x >= w){
return 0;
}
else if(y < 0 || y >= h){
return 0;
}
else
return 1;
}
*/
float *ghost_chunk(float *image, int height, int width, int a, int comm_size, int comm_rank){
MPI_Status status;
float *output;
if(comm_rank == 0 || comm_rank == (comm_size-1))
// output = (float*)malloc(sizeof(float)*width*(height + a));
hipMalloc((void **)&output,sizeof(float)*width*(height + a));
else
hipMalloc((void **)&output,sizeof(float)*width*(height + 2*a));
// output = (float*)malloc(sizeof(float)*width*(height + 2*a));
if(comm_rank == 0){
MPI_Sendrecv(image+width*(height-a), a*width, MPI_FLOAT, comm_rank+1, comm_rank,
output+width*height, a*width, MPI_FLOAT, comm_rank+1, comm_rank+1, MPI_COMM_WORLD, &status);
memcpy(output, image, sizeof(float)*width*height);
return output;
}else if(comm_rank == (comm_size-1)){
MPI_Sendrecv(image, a*width, MPI_FLOAT, comm_rank-1, comm_rank,
output, a*width, MPI_FLOAT, comm_rank-1, comm_rank-1, MPI_COMM_WORLD, &status);
memcpy(output+a*width, image, sizeof(float)*width*height);
return output+a*width;
}else{
//send top data to previous rank, receive top data from previous rank
MPI_Sendrecv(image, a*width, MPI_FLOAT, comm_rank-1, comm_rank,
output, a*width, MPI_FLOAT, comm_rank-1, comm_rank-1, MPI_COMM_WORLD, &status);
//send bottom data to next rank, receive top data from next rank
MPI_Sendrecv(image+width*(height-a), a*width, MPI_FLOAT, comm_rank+1,comm_rank,
output+width*(height+a), a*width, MPI_FLOAT, comm_rank+1, comm_rank+1, MPI_COMM_WORLD, &status);
memcpy(output+a*width, image, sizeof(float)*width*height);
return output+a*width;
}
}
__global__
void convolveGPU(float *image, float *outputG, float *kernel, int height,int width, int k_height, int k_width, int top, int bottom){
//using global memory
int i,j,m,offseti,offsetj;
float kerw=(k_width>k_height)?k_width:k_height;
//printf("%f",kerw);
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j<width ){
float sum = 0;
for( m=0; m<kerw; m++){
offseti = k_height>1?(-1*(k_height/2)+m):0;
offsetj = k_width>1?(-1*(k_width/2)+m):0;
if( (i+offseti)>=0-top && (i+offseti)<height+bottom && (j+offsetj)>=0 && (j+offsetj)< width)
sum+= image[(i+offseti)*width+(j+offsetj)]*kernel[m];
}
outputG[i*width+j]=sum;
}
/*
//using shared memory
int m,offseti,offsetj;
float kerw=(k_width>k_height)?k_width:k_height;
//printf("%f",kerw);
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
int globIx = blockIdx.x*blockDim.x+ threadIdx.x;
int globIy = blockIdx.y*blockDim.y+ threadIdx.y;
//read global memory to shared memory
extern __shared__ float AShared[];
AShared[locaIx*blockDim.y+locaIy]=image[globIx*width+globIy];
__syncthreads();
if(globIx<height && globIy<width ){
float sum = 0;
for( m=0; m<kerw; m++){
offseti = k_height>1?(-1*(k_height/2)+m):0;
offsetj = k_width>1?(-1*(k_width/2)+m):0;
if( (locaIx+offseti)>=0 && (locaIx+offseti)< blockDim.x && (locaIy+offsetj)>=0 && (locaIy+offsetj)<blockDim.y)
sum+= AShared[(locaIx+offseti)*blockDim.y+(locaIy+offsetj)]*kernel[m];
else if((globIx+offseti) >= 0 && (globIx+offseti) < height && (globIy+offsetj)>=0 && (globIy+offsetj)<width)
sum+= image[(globIx+offseti)*width+(globIy+offsetj)]*kernel[m];
}
output[globIx*width+globIy]=sum;
}
*/
}
void convolve(float *chunk_image,float *temp_horizon,float *gauss_Kernel,float *dgauss_Kernel,int height,int width, int k_height, int k_width ,int comm_size,int comm_rank,int blocksize){
/*
void convolve(float *image, float *d_temp_horizon, float *kernel, int height,
int width, int k_height, int k_width, int comm_size, int comm_rank, int blocksize){
*/
int w = k_height>1?k_height:k_width;
int a = floor(w/2);
float *d_temp_horizon,*d_temp_horizon_S,*d_chunk_image,*d_GKernel, *d_DKernel, *chunk_image_ghost, *chunk_image_ghost_S;
if(comm_rank == 0 || comm_rank == (comm_size-1))
hipMalloc((void **)&d_chunk_image,sizeof(float)*width*(height+a));
else
hipMalloc((void **)&d_chunk_image,sizeof(float)*width*(height+2*a));
hipMalloc((void **)&d_DKernel,sizeof(float)*w);
hipMalloc((void **)&d_temp_horizon,sizeof(float)*width*height);
hipMalloc((void **)&d_GKernel,sizeof(float)*w);
hipMalloc((void **)&d_temp_horizon_S,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1))
hipMemcpy(d_chunk_image,chunk_image,sizeof(float)*width*(height+a),hipMemcpyHostToDevice);
else
hipMemcpy(d_chunk_image,chunk_image,sizeof(float)*width*(height+2*a),hipMemcpyHostToDevice);
hipMemcpy(d_GKernel,gauss_Kernel,sizeof(float)*w,hipMemcpyHostToDevice);
hipMemcpy(d_DKernel,dgauss_Kernel,sizeof(float)*w,hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = floor(k_height/2);
}else if (comm_rank == comm_size-1){
top = floor(k_height/2);
bottom = 0;
}else{
top = floor(k_height/2);
bottom = top;
}
chunk_image_ghost = ghost_chunk(d_chunk_image, height, width, a ,comm_size, comm_rank);
// convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(chunk_image_ghost, d_temp_horizon, d_GKernel, height, width, k_height, k_width, top, bottom);
// chunk_image_ghost_S = ghost_chunk(d_temp_horizon, height, width, a ,comm_size, comm_rank);
// convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(chunk_image_ghost_S, d_temp_horizon_S, d_DKernel, height, width, k_width,k_height, top, bottom);
hipMemcpy(temp_horizon,d_temp_horizon_S,sizeof(float)*width*height,hipMemcpyDeviceToHost);
}
__global__
void MagnitudeGPU(float *vertical, float *horizon, float *Mag, int height, int width){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j<width)
Mag[i*width+j]=sqrt(pow(vertical[i*width+j],2)+pow(horizon[i*width+j],2));
}
void Magnitude(float *vertical, float *horizon,float *d_Mag, int height, int width, int blocksize){
float *d_vertical,*d_horizon;
hipMalloc((void **)&d_Mag,sizeof(float)*width*height);
hipMalloc((void **)&d_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_horizon,sizeof(float)*width*height);
hipMemcpy(d_vertical,vertical,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_horizon,horizon,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( MagnitudeGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_vertical, d_horizon, d_Mag, height, width);
hipDeviceSynchronize();
//hipMemcpy(Mag,d_Mag,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_Mag);
hipFree(d_vertical);
hipFree(d_horizon);
}
__global__
void DirectionGPU(float *vertical, float *horizon, float *Dir, int height,int width){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height&&j<width)
Dir[i*width+j]=atan2(vertical[i*width+j],horizon[i*width+j]);
}
void Direction(float *vertical, float *horizon, float *d_Dir, int height,int width, int blocksize){
float *d_vertical,*d_horizon;
hipMalloc((void **)&d_Dir,sizeof(float)*width*height);
hipMalloc((void **)&d_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_horizon,sizeof(float)*width*height);
hipMemcpy(d_vertical,vertical,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_horizon,horizon,sizeof(float)*width*height,hipMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( DirectionGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_vertical, d_horizon, d_Dir, height, width);
//hipMemcpy(Dir,d_Dir,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipFree(d_Dir);
hipFree(d_vertical);
hipFree(d_horizon);
}
__global__
void supressionGPU(float *sup, float *Mag, float *Dir, int width, int height, int top, int bottom){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height+bottom &&j<width){
float angle = Dir[i*width+j];
if(angle<0) angle = angle + M_PI;
angle=(180/M_PI)*angle;
// top and bottom
if(angle > 157.5 || angle <= 22.5){
if (i-1 >= 0-top && i+1 < height+bottom) {
if (Mag[(i-1)*width+j]>Mag[i*width+j] || Mag[(i+1)*width+j]>Mag[i*width+j])
sup[i*width+j]=0;
}
}
// top left and right botom
else if (angle>22.5 && angle<=67.5) {
if ( (i-1) >= 0-top && (j-1) >= 0){
if (Mag[(i-1)*width+(j-1)] > Mag[i*width+j]){
sup[i*width+j]=0;
}else if((i+1<height+bottom && j+1 <width)){
if(Mag[(i+1)*width+(j+1)]>Mag[i*width+j])
sup[i*width+j]=0;
}}}
//left and right
else if(angle>67.5 && angle<=112.5){
if (j-1 >= 0 && j+1 < width) {
if (Mag[i*width+(j-1)]>Mag[i*width+j] || Mag[i*width+(j+1)]>Mag[i*width+j]) {
sup[i*width+j]=0;
}
}
}
// left bottom and right top
else if(angle>112.5 && angle<=157.5){
if ((j-1 >= 0 && i-1 >= 0-top ) &&(i+1 < height+bottom && j+1 < width)) {
if (Mag[(i+1)*width+(j-1)]>Mag[i*width+j] || Mag[(i-1)*width+(j+1)]>Mag[i*width+j]) {
sup[i*width+j]=0;
}
}
}
}
}
void supression(float *d_sup, float *Mag, float *Dir, int width, int height, int comm_rank, int comm_size, int blocksize, int a){
float *d_Mag,*d_Dir;
hipMalloc((void **)&d_Dir,sizeof(float)*width*height);
hipMalloc((void **)&d_sup,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1))
hipMalloc((void **)&d_Mag,sizeof(float)*width*(height+a));
else
hipMalloc((void **)&d_Mag,sizeof(float)*width*(height+2*a));
hipMemcpy(d_Dir,Dir,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_sup,Mag,sizeof(float)*width*height,hipMemcpyHostToDevice);
if(comm_rank == 0 || comm_rank == (comm_size-1))
hipMemcpy(d_Mag,Mag,sizeof(float)*width*(height+a),hipMemcpyHostToDevice);
else
hipMemcpy(d_Mag,Mag,sizeof(float)*width*height+2*a,hipMemcpyHostToDevice);
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( supressionGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_sup, d_Mag, d_Dir, height, width, top, bottom);
// hipMemcpy(sup,d_sup,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipFree(d_Dir);
hipFree(d_Mag);
hipFree(d_sup);
}
__global__
void hysteresisGPU(float *sup, float *hys, int height, int width, float t_high, float t_low){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j <width){
if(sup[i*width+j]>=t_high)
hys[i*width+j]=255;
else if(sup[i*width+j]<=t_low)
hys[i*width+j]=0;
else if(sup[i*width+j]<t_high && sup[i*width+j]>t_low)
hys[i*width+j]=125;
}
}
void hysteresis(float *sup, float *d_hys, int height, int width, float t_high, float t_low, int blocksize){
float *d_sup;
hipMalloc((void **)&d_sup,sizeof(float)*width*height);
hipMalloc((void **)&d_hys,sizeof(float)*width*height);
hipMemcpy(d_sup,sup,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_hys,sup,sizeof(float)*width*height,hipMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( hysteresisGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_sup, d_hys, height, width, t_high, t_low);
// hipMemcpy(hys,d_hys,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipFree(d_hys);
hipFree(d_sup);
}
__global__
void FinaledgeGPU(float *edge, float *hys, int height, int width, int top, int bottom ){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
//edge[i*width+j]=hys[i*width+j];
for (int y=-1; y<=1; y++){
for (int x=-1; x<=1; x++){
if(i+y<height+bottom && i+y>0-top && j+x<width && j+x> 0){
if (hys[(i+y)*width+x+j]==255)
edge[i*width+j]=255;
else
edge[i*width+j]=0;
}
}
}
}
void Finaledge(float *d_edge, float *hys, int height, int width, int comm_size, int comm_rank, int blocksize){
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
float *d_hys;
hipMalloc((void **)&d_edge,sizeof(float)*width*height);
hipMalloc((void **)&d_hys,sizeof(float)*width*height);
hipMemcpy(d_edge,hys,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_hys,hys,sizeof(float)*width*height,hipMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( FinaledgeGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_edge, d_hys, height, width, top, bottom);
// hipMemcpy(edge,d_edge,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipFree(d_edge);
hipFree(d_hys);
}
__global__
void feature_detecGPU(float *d_cornerness,int height, int width, float *C_ver, float *C_hor, int blocksize, int top, int bottom){
//float k = 0.04;
int window_width = 7;
//float *cornerness = (float*)malloc(sizeof(float)*height*width);
//float *C_hor = (float*)malloc(sizeof(float)*height*width);
//float *C_ver = (float*)malloc(sizeof(float)*height*width);
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
int globIx = threadIdx.x+blockIdx.x*blockDim.x;
int globIy = threadIdx.y+blockIdx.y*blockDim.y;
float Ixx,Iyy,IxIy;
extern __shared__ float Ashared[];
__shared__ float *Vshared, *Hshared;
Vshared = Ashared;
Hshared = Ashared+blocksize*blocksize;
Vshared[locaIx*blockDim.y+locaIy] = C_ver[globIx*width+globIy];
Hshared[locaIx*blockDim.y+locaIy] = C_hor[globIx*width+globIy];
__syncthreads();
Ixx = 0;
Iyy = 0;
IxIy = 0;
if(globIx <height+bottom && globIy < width){
for(int k = -window_width/2; k < window_width/2 ; k++){
for(int m = -window_width/2; m < window_width/2 ; m++){
if(locaIx+k >= 0 && locaIx+k < blockDim.x && locaIy+m >= 0 && locaIy+m < blockDim.y){
int offseti = locaIx+k;
int offsetj = locaIy+m;
Ixx = Ixx + pow(Vshared[offseti*blockDim.y+offsetj],2);
Iyy = Iyy + pow(Hshared[offseti*blockDim.y+offsetj],2);
IxIy = IxIy + Vshared[offseti*blockDim.y+offsetj] * Hshared[offseti*blockDim.y+offsetj];
}
else if(globIx+k >= 0-top && globIx+k < height+bottom && globIy+m >= 0 && globIy+m < width){
int offseti = globIx+k;
int offsetj = globIy+m;
Ixx = Ixx + pow(C_ver[offseti*width+offsetj],2);
Iyy = Iyy + pow(C_hor[offseti*width+offsetj],2);
IxIy = IxIy + C_ver[offseti*width+offsetj] * C_hor[offseti*width+offsetj];
}
}
}
__syncthreads();
d_cornerness[globIx*width+globIy]= (Ixx*Iyy) - (IxIy*IxIy) - 0.04*(Ixx+Iyy)*(Ixx+Iyy);
printf("test");
}
}
void feature_detec(float *feature, int height, int width, float *vertical, float *horizon, int comm_size, int comm_rank, int blocksize, int a){
float *d_ver,*d_hor,*d_feature;
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
hipMalloc((void **)&d_feature,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1)){
hipMalloc((void **)&d_ver,sizeof(float)*width*(height+a));
hipMalloc((void **)&d_hor,sizeof(float)*width*(height+a));
}else{
hipMalloc((void **)&d_ver,sizeof(float)*width*(height+2*a));
hipMalloc((void **)&d_hor,sizeof(float)*width*(height+2*a));
}
if(comm_rank == 0 || comm_rank == (comm_size-1)){
hipMemcpy(d_ver,vertical,sizeof(float)*width*(height+a),hipMemcpyHostToDevice);
hipMemcpy(d_hor,horizon,sizeof(float)*width*(height+a),hipMemcpyHostToDevice);
}else{
hipMemcpy(d_ver,vertical,sizeof(float)*width*(height+2*a),hipMemcpyHostToDevice);
hipMemcpy(d_hor,horizon,sizeof(float)*width*(height+2*a),hipMemcpyHostToDevice);
}
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( feature_detecGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_feature, height, width, d_ver, d_hor, blocksize, top , bottom);
hipMemcpy(feature,d_feature,sizeof(float)*width*height,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_ver);
hipFree(d_hor);
hipFree(d_feature);
}
__global__
void find_featureGPU(float *output, float *d_cornerness,int height, int width, int blocksize){
printf("here\n");
//int locatI,locatJ;
int stride,localIndex;
// struct location *loc;
int window_size = blockDim.x;
//int window_width = blockDim.y;
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
//int Index_locaIx = blocksize*blocksize+threadIdx.x;
//int Index_locaIy = blocksize*blocksize+threadIdx.y;
int globIx = threadIdx.x+blockIdx.x*blockDim.x;
int globIy = threadIdx.y+blockIdx.y*blockDim.y;
extern __shared__ float Shared[];
__shared__ float *AShared,*indexShared;
AShared = Shared;
indexShared = Shared+blocksize*blocksize;
//float kerw=(k_width>k_height)?k_width:k_height;
AShared[locaIx*blockDim.y+locaIy] = d_cornerness[globIx*width+globIy];
indexShared[locaIx*blockDim.y+locaIy] = globIx*width+globIy;
//int a = indexShared[locaIx*blockDim.y+locaIy];
//printf("a; %d ",a);
__syncthreads();
// loc = (struct location*)malloc(sizeof(struct location)*window_heigh*window_width);
// int locount = 0;
//printf("%d ",AShared+locaIx*blockDim.y+locaIy);
// if(globIx < height && globIy <width){
for (stride = ((window_size*window_size)/2);stride >= 1; stride/=2){
__syncthreads();
localIndex = locaIx*blockDim.y+locaIy;
if(localIndex < stride){
if(AShared[localIndex]<AShared[localIndex+stride]){
AShared[localIndex]=AShared[localIndex+stride];
indexShared[localIndex]=indexShared[localIndex+stride];
// }else if(AShared[localIndex]<AShared[localIndex+stride])
// AShared[localIndex]=AShared[localIndex+stride];
// indexShared[localIndex]=localIndex+stride ;
}
}
}
if(locaIx == 0 && locaIy == 0){
output[globIx*width+globIy]=indexShared[0];
int a = indexShared[0];
printf("%d",a);
}
}
void find_feature(float *output,float *temp_feature,int height,int width,int comm_size, int comm_rank,int blocksize){
float *d_temp_feature, *d_output;
hipMalloc((void **)&d_output,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_feature,sizeof(float)*width*height);
hipMemcpy(d_temp_feature,temp_feature,sizeof(float)*width*height,hipMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hipLaunchKernelGGL(( find_featureGPU), dim3(dimGrid),dim3(dimBlock),2*sizeof(float)*blocksize*blocksize, 0, d_output, d_temp_feature, height, width,blocksize);
hipMemcpy(output,d_output,sizeof(float)*width*height,hipMemcpyDeviceToHost);
//hipFree(d_output);
// hipFree(d_temp_feature);
}
/*
int main(int argc, char ** argv){
int blocksize = atof(argv[3]);
int height,width, w;
float *image,sigma, a;
float *gauss_Kernel,*dgauss_Kernel;
//float *horizon;
// float *vertical;
float *Mag,*sup,*Dir,*hys;
float *edge;
//create pointers for GPU
hipSetDevice(0); //use GPU 0
float *d_image;
float *d_gauss_Kernel,*d_dgauss_Kernel;
float *d_temp_horizon,*d_horizon;
float *d_vertical, *d_temp_vertical;
float *d_Mag,*d_Dir,*d_sup, *d_hys, *d_edge;
float *d_cornerness,*cornerness,*d_features,*features;
struct timeval fileIStart,fileIEnd,fileOStart,fileOEnd,k1Start,k1End,k2Start,k2End,k3Start,k3End,k4Start,k4End,k5Start,k5End,k6Start,k6End,k7Start,k7End,k8Start,k8End,k9Start,k9End,H2DStart,H2DEnd,D2HStart,D2HEnd,start, end, computationstart,computationend;
gettimeofday(&start, NULL);
//for file input timer
gettimeofday(&fileIStart,NULL);
read_image_template(argv[1],&image,&width,&height);
gettimeofday(&fileIEnd, NULL);
gettimeofday(&computationstart, NULL);
sigma = atof(argv[2]);
a = round(2.5*sigma-0.5);
w = 2*a+1;
//printf("a:%f w:%d sigma: %f \n",a,w,sigma);
//Malloc for CPU
gauss_Kernel=(float*)malloc(sizeof(float)*w);
dgauss_Kernel=(float*)malloc(sizeof(float)*w);
//temp_horizon=(float *)malloc(sizeof(float)*width*height);
//horizon=(float *)malloc(sizeof(float)*width*height);
//temp_vertical=(float *)malloc(sizeof(float)*width*height);
//vertical=(float *)malloc(sizeof(float)*width*height);
//Mag=(float *)malloc(sizeof(float)*width*height);
//Dir=(float *)malloc(sizeof(float)*width*height);
sup=(float *)malloc(sizeof(float)*width*height);
//hys=(float *)malloc(sizeof(float)*width*height);
edge=(float *)malloc(sizeof(float)*width*height);
cornerness = (float*)malloc(sizeof(float)*height*width);
features = (float*)malloc(sizeof(float)*height*width);
//Malloc for GPU
hipMalloc((void **)&d_image,sizeof(float)*width*height);
hipMalloc((void **)&d_gauss_Kernel,sizeof(float)*w);
hipMalloc((void **)&d_dgauss_Kernel,sizeof(float)*w);
hipMalloc((void **)&d_temp_horizon,sizeof(float)*width*height);
hipMalloc((void **)&d_horizon,sizeof(float)*width*height);
hipMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_vertical,sizeof(float)*width*height);
hipMalloc((void **)&d_Mag,sizeof(float)*width*height);
hipMalloc((void **)&d_Dir,sizeof(float)*width*height);
hipMalloc((void **)&d_sup,sizeof(float)*width*height);
hipMalloc((void **)&d_hys,sizeof(float)*width*height);
hipMalloc((void **)&d_edge,sizeof(float)*width*height);
hipMalloc((void **)&d_cornerness,sizeof(float)*width*height);
hipMalloc((void **)&d_features,sizeof(float)*width*height);
Cal_gauss_kernel(gauss_Kernel,sigma,a, w, dgauss_Kernel);
//printf("Gaussian Kernel:\n");
//print_matrix(gauss_Kernel, 1, w);
//printf("Derivative Kernel:\n");
//print_matrix(dgauss_Kernel,1,w);
//copy data from CPU to GPU
gettimeofday(&H2DStart, NULL);
hipMemcpy(d_image,image,sizeof(float)*width*height,hipMemcpyHostToDevice);
hipMemcpy(d_gauss_Kernel,gauss_Kernel,sizeof(float)*w,hipMemcpyHostToDevice);
hipMemcpy(d_dgauss_Kernel,dgauss_Kernel,sizeof(float)*w,hipMemcpyHostToDevice);
hipDeviceSynchronize();
gettimeofday(&H2DEnd, NULL);
//Horizonal gradient
//int blocksize = atof(argv[3]);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
gettimeofday(&k1Start, NULL);
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_image, d_temp_horizon, d_gauss_Kernel, height, width, w, 1 );
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_temp_horizon, d_horizon, d_dgauss_Kernel, height, width, 1, w);
hipDeviceSynchronize();
gettimeofday(&k1End, NULL);
//convolve(image, &temp_horizon, gauss_Kernel, height, width, w, 1 );
//convolve(temp_horizon, &horizon, dgauss_Kernel, height, width, 1, w);
//hipMemcpy(horizon,d_horizon,sizeof(float)*width*height,hipMemcpyDeviceToHost);
//Vertical gradient
//convolve(image, &temp_vertical, gauss_Kernel, height, width,1, k_w);
//convolve(temp_vertical, &vertical, dgauss_Kernel, height, width, k_w, 1);
gettimeofday(&k2Start, NULL);
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_image, d_temp_vertical, d_gauss_Kernel, height, width, 1,w );
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_temp_horizon, d_vertical, d_dgauss_Kernel, height, width, w,1);
hipDeviceSynchronize();
gettimeofday(&k2End, NULL);
//hipMemcpy(vertical, d_vertical,sizeof(float)*width*height,hipMemcpyDeviceToHost);
// Magnitude
gettimeofday(&k3Start, NULL);
//Magnitude(vertical, horizon, &Mag, height, width);
MagnitudeGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Mag, height, width);
hipDeviceSynchronize();
gettimeofday(&k3End, NULL);
//hipMemcpy(Mag, d_Mag, sizeof(float)*width*height,hipMemcpyDeviceToHost);
// Direction
gettimeofday(&k4Start, NULL);
//Direction(vertical, horizon, &Dir, height, width);
DirectionGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Dir, height, width);
hipDeviceSynchronize();
gettimeofday(&k4End, NULL);
//hipMemcpy(Dir, d_Dir, sizeof(float)*width*height,hipMemcpyDeviceToHost);
// supression
//supression (&sup, Mag, Dir, height, width);
gettimeofday(&k5Start, NULL);
supressionGPU<<<dimGrid,dimBlock>>>(d_sup, d_Mag, d_Dir, height, width);
hipDeviceSynchronize();
gettimeofday(&k5End, NULL);
//hipMemcpy(sup, d_sup, sizeof(float)*width*height,hipMemcpyDeviceToHost);
// hysteresis
thrust::device_ptr<float>thr_d(d_sup);
thrust::device_vector<float>d_sup_vec(thr_d,thr_d+(height*width));
thrust::sort(d_sup_vec.begin(),d_sup_vec.end());
int index = (int)((0.9)*height*width);
float t_high = d_sup_vec[index];
float t_low = t_high/5;
gettimeofday(&k6Start, NULL);
hysteresisGPU<<<dimGrid,dimBlock>>>(d_sup, d_hys, height, width, t_high, t_low);
hipDeviceSynchronize();
//hipMemcpy(hys,d_hys,sizeof(float)*width*height,hipMemcpyDeviceToHost);
gettimeofday(&k6End, NULL);
// Finaledge
gettimeofday(&k7Start, NULL);
FinaledgeGPU<<<dimGrid,dimBlock>>>(d_edge, d_hys, height, width);
hipDeviceSynchronize();
gettimeofday(&k7End, NULL);
gettimeofday(&D2HStart, NULL);
hipMemcpy(edge,d_edge,sizeof(float)*width*height,hipMemcpyDeviceToHost);
gettimeofday(&D2HEnd, NULL);
// feature
gettimeofday(&k8Start, NULL);
feature_detecGPU<<<dimGrid,dimBlock,2*sizeof(float)*blocksize*blocksize>>>(d_cornerness,height, width, d_vertical, d_horizon, blocksize);
hipDeviceSynchronize();
gettimeofday(&k8End, NULL);
//hipMemcpy(cornerness,d_cornerness,sizeof(float)*width*height,hipMemcpyDeviceToHost);
// gettimeofday(&k9Start, NULL);
gettimeofday(&k9Start, NULL);
find_featureGPU<<<dimGrid,dimBlock,2*sizeof(float)*blocksize*blocksize>>>(d_features,d_cornerness,height, width,blocksize);
hipDeviceSynchronize();
gettimeofday(&k9End, NULL);
hipMemcpy(features,d_features,sizeof(float)*width*height,hipMemcpyDeviceToHost);
gettimeofday(&computationend, NULL);
// FILE *T0;
// T0=fopen("index.csv","w+");
int location_I, location_J;
for(int i = 0; i<width*height; i++){
if (features[i]>0){
int a = *(features+i);
location_I = a/width;
location_J = a%width;
printf("Index:%d, I:%d, J:%d\n",a,location_I,location_J);
}
}
// fclose(T0);
//output image
//write_image_template("h_convolve.pgm",horizon, width, height);
//write_image_template("v_convolve.pgm",vertical, width, height);
//write_image_template("Magnitude.pgm",Mag, width, height);
//write_image_template("Direction.pgm",Dir, width, height);
//write_image_template("suppress.pgm", sup, width, height);
//write_image_template("hysteresis.pgm", hys, width, height);
gettimeofday(&fileOStart,NULL);
write_image_template("edge.pgm", edge, width, height);
gettimeofday(&fileOEnd, NULL);
//write_image_template("cornerness.pgm", cornerness, width, height);
//free
//free(Mag);
//free(Dir);
//free(horizon);
//free(vertical);
//free(sup);
//free(hys);
free(edge);
hipFree(d_image);
// hipFree(d_image);
hipFree(d_temp_horizon);
hipFree(d_horizon);
hipFree(d_temp_vertical);
hipFree(d_vertical);
hipFree(d_Mag);
hipFree(d_Dir);
hipFree(d_sup);
hipFree(d_hys);
hipFree(d_edge);
gettimeofday(&end, NULL);
printf("BlockSize: %d Image-Height: %d Width: %d Sigma: %f file i/o time: %ld kernel time: %ld communication time: %ld end to end with i/o: %ld end to end with no i/o: %ld\n",blocksize,height,width,atof(argv[2]),(((fileOEnd.tv_sec *1000000 + fileOEnd.tv_usec)-(fileOStart.tv_sec * 1000000 + fileOStart.tv_usec))+((fileIEnd.tv_sec *1000000 + fileIEnd.tv_usec)-(fileIStart.tv_sec * 1000000 + fileIStart.tv_usec))),(((k1End.tv_sec *1000000 + k1End.tv_usec)-(k1Start.tv_sec * 1000000 + k1Start.tv_usec))+((k2End.tv_sec *1000000 + k2End.tv_usec)-(k2Start.tv_sec * 1000000 + k2Start.tv_usec))+((k3End.tv_sec *1000000 + k3End.tv_usec)-(k3Start.tv_sec * 1000000 + k3Start.tv_usec))+((k4End.tv_sec *1000000 + k4End.tv_usec)-(k4Start.tv_sec * 1000000 + k4Start.tv_usec))+((k5End.tv_sec *1000000 + k5End.tv_usec)-(k5Start.tv_sec * 1000000 + k5Start.tv_usec))+((k6End.tv_sec *1000000 + k6End.tv_usec)-(k6Start.tv_sec * 1000000 + k6Start.tv_usec))+((k7End.tv_sec *1000000 + k7End.tv_usec)-(k7Start.tv_sec * 1000000 + k7Start.tv_usec))),((H2DEnd.tv_sec *1000000 + H2DEnd.tv_usec)-(H2DStart.tv_sec * 1000000 + H2DStart.tv_usec)+(D2HEnd.tv_sec *1000000 + D2HEnd.tv_usec)-(D2HStart.tv_sec * 1000000 + D2HStart.tv_usec)),(end.tv_sec *1000000 + end.tv_usec)-(start.tv_sec * 1000000 + start.tv_usec),
(computationend.tv_sec *1000000 + computationend.tv_usec)-(computationstart.tv_sec * 1000000 + computationstart.tv_usec));
}*/
|
130e2a7ca41da9e8a12d4eb90da453a2e9e57ed9.cu
|
#include<stdio.h>
#include <string.h>
#include "stdlib.h"
#include<math.h>
#include"time.h"
#include"sys/time.h"
#include<cuda.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<mpi.h>
#include"canny_edge.h"
/*
struct location {
int locationI;
int locationJ;
};
int compare(const void *a, const void *b){
float c = *(float*)a;
float d = *(float*)b;
if(c < d) return -1;
else return 1;
}
void print_matrix(float *matrix, int height, int width){
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
printf("%.3f ", *(matrix+(i*width)+j));
}
printf("\n");
}
}
int range(int y, int x, int h, int w){
if(x < 0 || x >= w){
return 0;
}
else if(y < 0 || y >= h){
return 0;
}
else
return 1;
}
*/
float *ghost_chunk(float *image, int height, int width, int a, int comm_size, int comm_rank){
MPI_Status status;
float *output;
if(comm_rank == 0 || comm_rank == (comm_size-1))
// output = (float*)malloc(sizeof(float)*width*(height + a));
cudaMalloc((void **)&output,sizeof(float)*width*(height + a));
else
cudaMalloc((void **)&output,sizeof(float)*width*(height + 2*a));
// output = (float*)malloc(sizeof(float)*width*(height + 2*a));
if(comm_rank == 0){
MPI_Sendrecv(image+width*(height-a), a*width, MPI_FLOAT, comm_rank+1, comm_rank,
output+width*height, a*width, MPI_FLOAT, comm_rank+1, comm_rank+1, MPI_COMM_WORLD, &status);
memcpy(output, image, sizeof(float)*width*height);
return output;
}else if(comm_rank == (comm_size-1)){
MPI_Sendrecv(image, a*width, MPI_FLOAT, comm_rank-1, comm_rank,
output, a*width, MPI_FLOAT, comm_rank-1, comm_rank-1, MPI_COMM_WORLD, &status);
memcpy(output+a*width, image, sizeof(float)*width*height);
return output+a*width;
}else{
//send top data to previous rank, receive top data from previous rank
MPI_Sendrecv(image, a*width, MPI_FLOAT, comm_rank-1, comm_rank,
output, a*width, MPI_FLOAT, comm_rank-1, comm_rank-1, MPI_COMM_WORLD, &status);
//send bottom data to next rank, receive top data from next rank
MPI_Sendrecv(image+width*(height-a), a*width, MPI_FLOAT, comm_rank+1,comm_rank,
output+width*(height+a), a*width, MPI_FLOAT, comm_rank+1, comm_rank+1, MPI_COMM_WORLD, &status);
memcpy(output+a*width, image, sizeof(float)*width*height);
return output+a*width;
}
}
__global__
void convolveGPU(float *image, float *outputG, float *kernel, int height,int width, int k_height, int k_width, int top, int bottom){
//using global memory
int i,j,m,offseti,offsetj;
float kerw=(k_width>k_height)?k_width:k_height;
//printf("%f",kerw);
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j<width ){
float sum = 0;
for( m=0; m<kerw; m++){
offseti = k_height>1?(-1*(k_height/2)+m):0;
offsetj = k_width>1?(-1*(k_width/2)+m):0;
if( (i+offseti)>=0-top && (i+offseti)<height+bottom && (j+offsetj)>=0 && (j+offsetj)< width)
sum+= image[(i+offseti)*width+(j+offsetj)]*kernel[m];
}
outputG[i*width+j]=sum;
}
/*
//using shared memory
int m,offseti,offsetj;
float kerw=(k_width>k_height)?k_width:k_height;
//printf("%f",kerw);
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
int globIx = blockIdx.x*blockDim.x+ threadIdx.x;
int globIy = blockIdx.y*blockDim.y+ threadIdx.y;
//read global memory to shared memory
extern __shared__ float AShared[];
AShared[locaIx*blockDim.y+locaIy]=image[globIx*width+globIy];
__syncthreads();
if(globIx<height && globIy<width ){
float sum = 0;
for( m=0; m<kerw; m++){
offseti = k_height>1?(-1*(k_height/2)+m):0;
offsetj = k_width>1?(-1*(k_width/2)+m):0;
if( (locaIx+offseti)>=0 && (locaIx+offseti)< blockDim.x && (locaIy+offsetj)>=0 && (locaIy+offsetj)<blockDim.y)
sum+= AShared[(locaIx+offseti)*blockDim.y+(locaIy+offsetj)]*kernel[m];
else if((globIx+offseti) >= 0 && (globIx+offseti) < height && (globIy+offsetj)>=0 && (globIy+offsetj)<width)
sum+= image[(globIx+offseti)*width+(globIy+offsetj)]*kernel[m];
}
output[globIx*width+globIy]=sum;
}
*/
}
void convolve(float *chunk_image,float *temp_horizon,float *gauss_Kernel,float *dgauss_Kernel,int height,int width, int k_height, int k_width ,int comm_size,int comm_rank,int blocksize){
/*
void convolve(float *image, float *d_temp_horizon, float *kernel, int height,
int width, int k_height, int k_width, int comm_size, int comm_rank, int blocksize){
*/
int w = k_height>1?k_height:k_width;
int a = floor(w/2);
float *d_temp_horizon,*d_temp_horizon_S,*d_chunk_image,*d_GKernel, *d_DKernel, *chunk_image_ghost, *chunk_image_ghost_S;
if(comm_rank == 0 || comm_rank == (comm_size-1))
cudaMalloc((void **)&d_chunk_image,sizeof(float)*width*(height+a));
else
cudaMalloc((void **)&d_chunk_image,sizeof(float)*width*(height+2*a));
cudaMalloc((void **)&d_DKernel,sizeof(float)*w);
cudaMalloc((void **)&d_temp_horizon,sizeof(float)*width*height);
cudaMalloc((void **)&d_GKernel,sizeof(float)*w);
cudaMalloc((void **)&d_temp_horizon_S,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1))
cudaMemcpy(d_chunk_image,chunk_image,sizeof(float)*width*(height+a),cudaMemcpyHostToDevice);
else
cudaMemcpy(d_chunk_image,chunk_image,sizeof(float)*width*(height+2*a),cudaMemcpyHostToDevice);
cudaMemcpy(d_GKernel,gauss_Kernel,sizeof(float)*w,cudaMemcpyHostToDevice);
cudaMemcpy(d_DKernel,dgauss_Kernel,sizeof(float)*w,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = floor(k_height/2);
}else if (comm_rank == comm_size-1){
top = floor(k_height/2);
bottom = 0;
}else{
top = floor(k_height/2);
bottom = top;
}
chunk_image_ghost = ghost_chunk(d_chunk_image, height, width, a ,comm_size, comm_rank);
// convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(chunk_image_ghost, d_temp_horizon, d_GKernel, height, width, k_height, k_width, top, bottom);
// chunk_image_ghost_S = ghost_chunk(d_temp_horizon, height, width, a ,comm_size, comm_rank);
// convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(chunk_image_ghost_S, d_temp_horizon_S, d_DKernel, height, width, k_width,k_height, top, bottom);
cudaMemcpy(temp_horizon,d_temp_horizon_S,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
}
__global__
void MagnitudeGPU(float *vertical, float *horizon, float *Mag, int height, int width){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j<width)
Mag[i*width+j]=sqrt(pow(vertical[i*width+j],2)+pow(horizon[i*width+j],2));
}
void Magnitude(float *vertical, float *horizon,float *d_Mag, int height, int width, int blocksize){
float *d_vertical,*d_horizon;
cudaMalloc((void **)&d_Mag,sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_horizon,sizeof(float)*width*height);
cudaMemcpy(d_vertical,vertical,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_horizon,horizon,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
MagnitudeGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Mag, height, width);
cudaDeviceSynchronize();
//cudaMemcpy(Mag,d_Mag,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_Mag);
cudaFree(d_vertical);
cudaFree(d_horizon);
}
__global__
void DirectionGPU(float *vertical, float *horizon, float *Dir, int height,int width){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height&&j<width)
Dir[i*width+j]=atan2(vertical[i*width+j],horizon[i*width+j]);
}
void Direction(float *vertical, float *horizon, float *d_Dir, int height,int width, int blocksize){
float *d_vertical,*d_horizon;
cudaMalloc((void **)&d_Dir,sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_horizon,sizeof(float)*width*height);
cudaMemcpy(d_vertical,vertical,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_horizon,horizon,sizeof(float)*width*height,cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
DirectionGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Dir, height, width);
//cudaMemcpy(Dir,d_Dir,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaFree(d_Dir);
cudaFree(d_vertical);
cudaFree(d_horizon);
}
__global__
void supressionGPU(float *sup, float *Mag, float *Dir, int width, int height, int top, int bottom){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height+bottom &&j<width){
float angle = Dir[i*width+j];
if(angle<0) angle = angle + M_PI;
angle=(180/M_PI)*angle;
// top and bottom
if(angle > 157.5 || angle <= 22.5){
if (i-1 >= 0-top && i+1 < height+bottom) {
if (Mag[(i-1)*width+j]>Mag[i*width+j] || Mag[(i+1)*width+j]>Mag[i*width+j])
sup[i*width+j]=0;
}
}
// top left and right botom
else if (angle>22.5 && angle<=67.5) {
if ( (i-1) >= 0-top && (j-1) >= 0){
if (Mag[(i-1)*width+(j-1)] > Mag[i*width+j]){
sup[i*width+j]=0;
}else if((i+1<height+bottom && j+1 <width)){
if(Mag[(i+1)*width+(j+1)]>Mag[i*width+j])
sup[i*width+j]=0;
}}}
//left and right
else if(angle>67.5 && angle<=112.5){
if (j-1 >= 0 && j+1 < width) {
if (Mag[i*width+(j-1)]>Mag[i*width+j] || Mag[i*width+(j+1)]>Mag[i*width+j]) {
sup[i*width+j]=0;
}
}
}
// left bottom and right top
else if(angle>112.5 && angle<=157.5){
if ((j-1 >= 0 && i-1 >= 0-top ) &&(i+1 < height+bottom && j+1 < width)) {
if (Mag[(i+1)*width+(j-1)]>Mag[i*width+j] || Mag[(i-1)*width+(j+1)]>Mag[i*width+j]) {
sup[i*width+j]=0;
}
}
}
}
}
void supression(float *d_sup, float *Mag, float *Dir, int width, int height, int comm_rank, int comm_size, int blocksize, int a){
float *d_Mag,*d_Dir;
cudaMalloc((void **)&d_Dir,sizeof(float)*width*height);
cudaMalloc((void **)&d_sup,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1))
cudaMalloc((void **)&d_Mag,sizeof(float)*width*(height+a));
else
cudaMalloc((void **)&d_Mag,sizeof(float)*width*(height+2*a));
cudaMemcpy(d_Dir,Dir,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_sup,Mag,sizeof(float)*width*height,cudaMemcpyHostToDevice);
if(comm_rank == 0 || comm_rank == (comm_size-1))
cudaMemcpy(d_Mag,Mag,sizeof(float)*width*(height+a),cudaMemcpyHostToDevice);
else
cudaMemcpy(d_Mag,Mag,sizeof(float)*width*height+2*a,cudaMemcpyHostToDevice);
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
supressionGPU<<<dimGrid,dimBlock>>>(d_sup, d_Mag, d_Dir, height, width, top, bottom);
// cudaMemcpy(sup,d_sup,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaFree(d_Dir);
cudaFree(d_Mag);
cudaFree(d_sup);
}
__global__
void hysteresisGPU(float *sup, float *hys, int height, int width, float t_high, float t_low){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
if(i<height && j <width){
if(sup[i*width+j]>=t_high)
hys[i*width+j]=255;
else if(sup[i*width+j]<=t_low)
hys[i*width+j]=0;
else if(sup[i*width+j]<t_high && sup[i*width+j]>t_low)
hys[i*width+j]=125;
}
}
void hysteresis(float *sup, float *d_hys, int height, int width, float t_high, float t_low, int blocksize){
float *d_sup;
cudaMalloc((void **)&d_sup,sizeof(float)*width*height);
cudaMalloc((void **)&d_hys,sizeof(float)*width*height);
cudaMemcpy(d_sup,sup,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_hys,sup,sizeof(float)*width*height,cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
hysteresisGPU<<<dimGrid,dimBlock>>>(d_sup, d_hys, height, width, t_high, t_low);
// cudaMemcpy(hys,d_hys,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaFree(d_hys);
cudaFree(d_sup);
}
__global__
void FinaledgeGPU(float *edge, float *hys, int height, int width, int top, int bottom ){
int i,j;
i=threadIdx.x+blockIdx.x*blockDim.x;
j=threadIdx.y+blockIdx.y*blockDim.y;
//edge[i*width+j]=hys[i*width+j];
for (int y=-1; y<=1; y++){
for (int x=-1; x<=1; x++){
if(i+y<height+bottom && i+y>0-top && j+x<width && j+x> 0){
if (hys[(i+y)*width+x+j]==255)
edge[i*width+j]=255;
else
edge[i*width+j]=0;
}
}
}
}
void Finaledge(float *d_edge, float *hys, int height, int width, int comm_size, int comm_rank, int blocksize){
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
float *d_hys;
cudaMalloc((void **)&d_edge,sizeof(float)*width*height);
cudaMalloc((void **)&d_hys,sizeof(float)*width*height);
cudaMemcpy(d_edge,hys,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_hys,hys,sizeof(float)*width*height,cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
FinaledgeGPU<<<dimGrid,dimBlock>>>(d_edge, d_hys, height, width, top, bottom);
// cudaMemcpy(edge,d_edge,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaFree(d_edge);
cudaFree(d_hys);
}
__global__
void feature_detecGPU(float *d_cornerness,int height, int width, float *C_ver, float *C_hor, int blocksize, int top, int bottom){
//float k = 0.04;
int window_width = 7;
//float *cornerness = (float*)malloc(sizeof(float)*height*width);
//float *C_hor = (float*)malloc(sizeof(float)*height*width);
//float *C_ver = (float*)malloc(sizeof(float)*height*width);
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
int globIx = threadIdx.x+blockIdx.x*blockDim.x;
int globIy = threadIdx.y+blockIdx.y*blockDim.y;
float Ixx,Iyy,IxIy;
extern __shared__ float Ashared[];
__shared__ float *Vshared, *Hshared;
Vshared = Ashared;
Hshared = Ashared+blocksize*blocksize;
Vshared[locaIx*blockDim.y+locaIy] = C_ver[globIx*width+globIy];
Hshared[locaIx*blockDim.y+locaIy] = C_hor[globIx*width+globIy];
__syncthreads();
Ixx = 0;
Iyy = 0;
IxIy = 0;
if(globIx <height+bottom && globIy < width){
for(int k = -window_width/2; k < window_width/2 ; k++){
for(int m = -window_width/2; m < window_width/2 ; m++){
if(locaIx+k >= 0 && locaIx+k < blockDim.x && locaIy+m >= 0 && locaIy+m < blockDim.y){
int offseti = locaIx+k;
int offsetj = locaIy+m;
Ixx = Ixx + pow(Vshared[offseti*blockDim.y+offsetj],2);
Iyy = Iyy + pow(Hshared[offseti*blockDim.y+offsetj],2);
IxIy = IxIy + Vshared[offseti*blockDim.y+offsetj] * Hshared[offseti*blockDim.y+offsetj];
}
else if(globIx+k >= 0-top && globIx+k < height+bottom && globIy+m >= 0 && globIy+m < width){
int offseti = globIx+k;
int offsetj = globIy+m;
Ixx = Ixx + pow(C_ver[offseti*width+offsetj],2);
Iyy = Iyy + pow(C_hor[offseti*width+offsetj],2);
IxIy = IxIy + C_ver[offseti*width+offsetj] * C_hor[offseti*width+offsetj];
}
}
}
__syncthreads();
d_cornerness[globIx*width+globIy]= (Ixx*Iyy) - (IxIy*IxIy) - 0.04*(Ixx+Iyy)*(Ixx+Iyy);
printf("test");
}
}
void feature_detec(float *feature, int height, int width, float *vertical, float *horizon, int comm_size, int comm_rank, int blocksize, int a){
float *d_ver,*d_hor,*d_feature;
int top, bottom;
if (comm_rank == 0){
top = 0;
bottom = 1;
}else if (comm_rank == comm_size-1){
top = 1;
bottom = 0;
}else{
top = 1;
bottom = 1;
}
cudaMalloc((void **)&d_feature,sizeof(float)*width*height);
if(comm_rank == 0 || comm_rank == (comm_size-1)){
cudaMalloc((void **)&d_ver,sizeof(float)*width*(height+a));
cudaMalloc((void **)&d_hor,sizeof(float)*width*(height+a));
}else{
cudaMalloc((void **)&d_ver,sizeof(float)*width*(height+2*a));
cudaMalloc((void **)&d_hor,sizeof(float)*width*(height+2*a));
}
if(comm_rank == 0 || comm_rank == (comm_size-1)){
cudaMemcpy(d_ver,vertical,sizeof(float)*width*(height+a),cudaMemcpyHostToDevice);
cudaMemcpy(d_hor,horizon,sizeof(float)*width*(height+a),cudaMemcpyHostToDevice);
}else{
cudaMemcpy(d_ver,vertical,sizeof(float)*width*(height+2*a),cudaMemcpyHostToDevice);
cudaMemcpy(d_hor,horizon,sizeof(float)*width*(height+2*a),cudaMemcpyHostToDevice);
}
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
feature_detecGPU<<<dimGrid,dimBlock>>>(d_feature, height, width, d_ver, d_hor, blocksize, top , bottom);
cudaMemcpy(feature,d_feature,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_ver);
cudaFree(d_hor);
cudaFree(d_feature);
}
__global__
void find_featureGPU(float *output, float *d_cornerness,int height, int width, int blocksize){
printf("here\n");
//int locatI,locatJ;
int stride,localIndex;
// struct location *loc;
int window_size = blockDim.x;
//int window_width = blockDim.y;
int locaIx = threadIdx.x;
int locaIy = threadIdx.y;
//int Index_locaIx = blocksize*blocksize+threadIdx.x;
//int Index_locaIy = blocksize*blocksize+threadIdx.y;
int globIx = threadIdx.x+blockIdx.x*blockDim.x;
int globIy = threadIdx.y+blockIdx.y*blockDim.y;
extern __shared__ float Shared[];
__shared__ float *AShared,*indexShared;
AShared = Shared;
indexShared = Shared+blocksize*blocksize;
//float kerw=(k_width>k_height)?k_width:k_height;
AShared[locaIx*blockDim.y+locaIy] = d_cornerness[globIx*width+globIy];
indexShared[locaIx*blockDim.y+locaIy] = globIx*width+globIy;
//int a = indexShared[locaIx*blockDim.y+locaIy];
//printf("a; %d ",a);
__syncthreads();
// loc = (struct location*)malloc(sizeof(struct location)*window_heigh*window_width);
// int locount = 0;
//printf("%d ",AShared+locaIx*blockDim.y+locaIy);
// if(globIx < height && globIy <width){
for (stride = ((window_size*window_size)/2);stride >= 1; stride/=2){
__syncthreads();
localIndex = locaIx*blockDim.y+locaIy;
if(localIndex < stride){
if(AShared[localIndex]<AShared[localIndex+stride]){
AShared[localIndex]=AShared[localIndex+stride];
indexShared[localIndex]=indexShared[localIndex+stride];
// }else if(AShared[localIndex]<AShared[localIndex+stride])
// AShared[localIndex]=AShared[localIndex+stride];
// indexShared[localIndex]=localIndex+stride ;
}
}
}
if(locaIx == 0 && locaIy == 0){
output[globIx*width+globIy]=indexShared[0];
int a = indexShared[0];
printf("%d",a);
}
}
void find_feature(float *output,float *temp_feature,int height,int width,int comm_size, int comm_rank,int blocksize){
float *d_temp_feature, *d_output;
cudaMalloc((void **)&d_output,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_feature,sizeof(float)*width*height);
cudaMemcpy(d_temp_feature,temp_feature,sizeof(float)*width*height,cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
find_featureGPU<<<dimGrid,dimBlock,2*sizeof(float)*blocksize*blocksize>>>(d_output, d_temp_feature, height, width,blocksize);
cudaMemcpy(output,d_output,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
//cudaFree(d_output);
// cudaFree(d_temp_feature);
}
/*
int main(int argc, char ** argv){
int blocksize = atof(argv[3]);
int height,width, w;
float *image,sigma, a;
float *gauss_Kernel,*dgauss_Kernel;
//float *horizon;
// float *vertical;
float *Mag,*sup,*Dir,*hys;
float *edge;
//create pointers for GPU
cudaSetDevice(0); //use GPU 0
float *d_image;
float *d_gauss_Kernel,*d_dgauss_Kernel;
float *d_temp_horizon,*d_horizon;
float *d_vertical, *d_temp_vertical;
float *d_Mag,*d_Dir,*d_sup, *d_hys, *d_edge;
float *d_cornerness,*cornerness,*d_features,*features;
struct timeval fileIStart,fileIEnd,fileOStart,fileOEnd,k1Start,k1End,k2Start,k2End,k3Start,k3End,k4Start,k4End,k5Start,k5End,k6Start,k6End,k7Start,k7End,k8Start,k8End,k9Start,k9End,H2DStart,H2DEnd,D2HStart,D2HEnd,start, end, computationstart,computationend;
gettimeofday(&start, NULL);
//for file input timer
gettimeofday(&fileIStart,NULL);
read_image_template(argv[1],&image,&width,&height);
gettimeofday(&fileIEnd, NULL);
gettimeofday(&computationstart, NULL);
sigma = atof(argv[2]);
a = round(2.5*sigma-0.5);
w = 2*a+1;
//printf("a:%f w:%d sigma: %f \n",a,w,sigma);
//Malloc for CPU
gauss_Kernel=(float*)malloc(sizeof(float)*w);
dgauss_Kernel=(float*)malloc(sizeof(float)*w);
//temp_horizon=(float *)malloc(sizeof(float)*width*height);
//horizon=(float *)malloc(sizeof(float)*width*height);
//temp_vertical=(float *)malloc(sizeof(float)*width*height);
//vertical=(float *)malloc(sizeof(float)*width*height);
//Mag=(float *)malloc(sizeof(float)*width*height);
//Dir=(float *)malloc(sizeof(float)*width*height);
sup=(float *)malloc(sizeof(float)*width*height);
//hys=(float *)malloc(sizeof(float)*width*height);
edge=(float *)malloc(sizeof(float)*width*height);
cornerness = (float*)malloc(sizeof(float)*height*width);
features = (float*)malloc(sizeof(float)*height*width);
//Malloc for GPU
cudaMalloc((void **)&d_image,sizeof(float)*width*height);
cudaMalloc((void **)&d_gauss_Kernel,sizeof(float)*w);
cudaMalloc((void **)&d_dgauss_Kernel,sizeof(float)*w);
cudaMalloc((void **)&d_temp_horizon,sizeof(float)*width*height);
cudaMalloc((void **)&d_horizon,sizeof(float)*width*height);
cudaMalloc((void **)&d_temp_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_vertical,sizeof(float)*width*height);
cudaMalloc((void **)&d_Mag,sizeof(float)*width*height);
cudaMalloc((void **)&d_Dir,sizeof(float)*width*height);
cudaMalloc((void **)&d_sup,sizeof(float)*width*height);
cudaMalloc((void **)&d_hys,sizeof(float)*width*height);
cudaMalloc((void **)&d_edge,sizeof(float)*width*height);
cudaMalloc((void **)&d_cornerness,sizeof(float)*width*height);
cudaMalloc((void **)&d_features,sizeof(float)*width*height);
Cal_gauss_kernel(gauss_Kernel,sigma,a, w, dgauss_Kernel);
//printf("Gaussian Kernel:\n");
//print_matrix(gauss_Kernel, 1, w);
//printf("Derivative Kernel:\n");
//print_matrix(dgauss_Kernel,1,w);
//copy data from CPU to GPU
gettimeofday(&H2DStart, NULL);
cudaMemcpy(d_image,image,sizeof(float)*width*height,cudaMemcpyHostToDevice);
cudaMemcpy(d_gauss_Kernel,gauss_Kernel,sizeof(float)*w,cudaMemcpyHostToDevice);
cudaMemcpy(d_dgauss_Kernel,dgauss_Kernel,sizeof(float)*w,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
gettimeofday(&H2DEnd, NULL);
//Horizonal gradient
//int blocksize = atof(argv[3]);
dim3 dimBlock(blocksize,blocksize,1);
dim3 dimGrid(ceil(height/blocksize),ceil(width/blocksize),1);
gettimeofday(&k1Start, NULL);
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_image, d_temp_horizon, d_gauss_Kernel, height, width, w, 1 );
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_temp_horizon, d_horizon, d_dgauss_Kernel, height, width, 1, w);
cudaDeviceSynchronize();
gettimeofday(&k1End, NULL);
//convolve(image, &temp_horizon, gauss_Kernel, height, width, w, 1 );
//convolve(temp_horizon, &horizon, dgauss_Kernel, height, width, 1, w);
//cudaMemcpy(horizon,d_horizon,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
//Vertical gradient
//convolve(image, &temp_vertical, gauss_Kernel, height, width,1, k_w);
//convolve(temp_vertical, &vertical, dgauss_Kernel, height, width, k_w, 1);
gettimeofday(&k2Start, NULL);
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_image, d_temp_vertical, d_gauss_Kernel, height, width, 1,w );
convolveGPU<<<dimGrid,dimBlock,sizeof(float)*blocksize*blocksize>>>(d_temp_horizon, d_vertical, d_dgauss_Kernel, height, width, w,1);
cudaDeviceSynchronize();
gettimeofday(&k2End, NULL);
//cudaMemcpy(vertical, d_vertical,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
// Magnitude
gettimeofday(&k3Start, NULL);
//Magnitude(vertical, horizon, &Mag, height, width);
MagnitudeGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Mag, height, width);
cudaDeviceSynchronize();
gettimeofday(&k3End, NULL);
//cudaMemcpy(Mag, d_Mag, sizeof(float)*width*height,cudaMemcpyDeviceToHost);
// Direction
gettimeofday(&k4Start, NULL);
//Direction(vertical, horizon, &Dir, height, width);
DirectionGPU<<<dimGrid,dimBlock>>>(d_vertical, d_horizon, d_Dir, height, width);
cudaDeviceSynchronize();
gettimeofday(&k4End, NULL);
//cudaMemcpy(Dir, d_Dir, sizeof(float)*width*height,cudaMemcpyDeviceToHost);
// supression
//supression (&sup, Mag, Dir, height, width);
gettimeofday(&k5Start, NULL);
supressionGPU<<<dimGrid,dimBlock>>>(d_sup, d_Mag, d_Dir, height, width);
cudaDeviceSynchronize();
gettimeofday(&k5End, NULL);
//cudaMemcpy(sup, d_sup, sizeof(float)*width*height,cudaMemcpyDeviceToHost);
// hysteresis
thrust::device_ptr<float>thr_d(d_sup);
thrust::device_vector<float>d_sup_vec(thr_d,thr_d+(height*width));
thrust::sort(d_sup_vec.begin(),d_sup_vec.end());
int index = (int)((0.9)*height*width);
float t_high = d_sup_vec[index];
float t_low = t_high/5;
gettimeofday(&k6Start, NULL);
hysteresisGPU<<<dimGrid,dimBlock>>>(d_sup, d_hys, height, width, t_high, t_low);
cudaDeviceSynchronize();
//cudaMemcpy(hys,d_hys,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
gettimeofday(&k6End, NULL);
// Finaledge
gettimeofday(&k7Start, NULL);
FinaledgeGPU<<<dimGrid,dimBlock>>>(d_edge, d_hys, height, width);
cudaDeviceSynchronize();
gettimeofday(&k7End, NULL);
gettimeofday(&D2HStart, NULL);
cudaMemcpy(edge,d_edge,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
gettimeofday(&D2HEnd, NULL);
// feature
gettimeofday(&k8Start, NULL);
feature_detecGPU<<<dimGrid,dimBlock,2*sizeof(float)*blocksize*blocksize>>>(d_cornerness,height, width, d_vertical, d_horizon, blocksize);
cudaDeviceSynchronize();
gettimeofday(&k8End, NULL);
//cudaMemcpy(cornerness,d_cornerness,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
// gettimeofday(&k9Start, NULL);
gettimeofday(&k9Start, NULL);
find_featureGPU<<<dimGrid,dimBlock,2*sizeof(float)*blocksize*blocksize>>>(d_features,d_cornerness,height, width,blocksize);
cudaDeviceSynchronize();
gettimeofday(&k9End, NULL);
cudaMemcpy(features,d_features,sizeof(float)*width*height,cudaMemcpyDeviceToHost);
gettimeofday(&computationend, NULL);
// FILE *T0;
// T0=fopen("index.csv","w+");
int location_I, location_J;
for(int i = 0; i<width*height; i++){
if (features[i]>0){
int a = *(features+i);
location_I = a/width;
location_J = a%width;
printf("Index:%d, I:%d, J:%d\n",a,location_I,location_J);
}
}
// fclose(T0);
//output image
//write_image_template("h_convolve.pgm",horizon, width, height);
//write_image_template("v_convolve.pgm",vertical, width, height);
//write_image_template("Magnitude.pgm",Mag, width, height);
//write_image_template("Direction.pgm",Dir, width, height);
//write_image_template("suppress.pgm", sup, width, height);
//write_image_template("hysteresis.pgm", hys, width, height);
gettimeofday(&fileOStart,NULL);
write_image_template("edge.pgm", edge, width, height);
gettimeofday(&fileOEnd, NULL);
//write_image_template("cornerness.pgm", cornerness, width, height);
//free
//free(Mag);
//free(Dir);
//free(horizon);
//free(vertical);
//free(sup);
//free(hys);
free(edge);
cudaFree(d_image);
// cudaFree(d_image);
cudaFree(d_temp_horizon);
cudaFree(d_horizon);
cudaFree(d_temp_vertical);
cudaFree(d_vertical);
cudaFree(d_Mag);
cudaFree(d_Dir);
cudaFree(d_sup);
cudaFree(d_hys);
cudaFree(d_edge);
gettimeofday(&end, NULL);
printf("BlockSize: %d Image-Height: %d Width: %d Sigma: %f file i/o time: %ld kernel time: %ld communication time: %ld end to end with i/o: %ld end to end with no i/o: %ld\n",blocksize,height,width,atof(argv[2]),(((fileOEnd.tv_sec *1000000 + fileOEnd.tv_usec)-(fileOStart.tv_sec * 1000000 + fileOStart.tv_usec))+((fileIEnd.tv_sec *1000000 + fileIEnd.tv_usec)-(fileIStart.tv_sec * 1000000 + fileIStart.tv_usec))),(((k1End.tv_sec *1000000 + k1End.tv_usec)-(k1Start.tv_sec * 1000000 + k1Start.tv_usec))+((k2End.tv_sec *1000000 + k2End.tv_usec)-(k2Start.tv_sec * 1000000 + k2Start.tv_usec))+((k3End.tv_sec *1000000 + k3End.tv_usec)-(k3Start.tv_sec * 1000000 + k3Start.tv_usec))+((k4End.tv_sec *1000000 + k4End.tv_usec)-(k4Start.tv_sec * 1000000 + k4Start.tv_usec))+((k5End.tv_sec *1000000 + k5End.tv_usec)-(k5Start.tv_sec * 1000000 + k5Start.tv_usec))+((k6End.tv_sec *1000000 + k6End.tv_usec)-(k6Start.tv_sec * 1000000 + k6Start.tv_usec))+((k7End.tv_sec *1000000 + k7End.tv_usec)-(k7Start.tv_sec * 1000000 + k7Start.tv_usec))),((H2DEnd.tv_sec *1000000 + H2DEnd.tv_usec)-(H2DStart.tv_sec * 1000000 + H2DStart.tv_usec)+(D2HEnd.tv_sec *1000000 + D2HEnd.tv_usec)-(D2HStart.tv_sec * 1000000 + D2HStart.tv_usec)),(end.tv_sec *1000000 + end.tv_usec)-(start.tv_sec * 1000000 + start.tv_usec),
(computationend.tv_sec *1000000 + computationend.tv_usec)-(computationstart.tv_sec * 1000000 + computationstart.tv_usec));
}*/
|
c96a19541feb396c36cce5459b478251c73688df.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <rocblas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
c96a19541feb396c36cce5459b478251c73688df.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Python.h>
#include <arrayobject.h>
#include <assert.h>
#include <helper_cuda.h>
#include <cublas.h>
#include <time.h>
#include <vector>
#include <matrix.h>
#include <queue.h>
#include <worker.cuh>
#include <util.cuh>
#include <cost.cuh>
#include <pyconvnet.cuh>
#include <convnet.cuh>
using namespace std;
static ConvNet* model = NULL;
static PyMethodDef _ConvNetMethods[] = { { "initModel", initModel, METH_VARARGS },
{ "startBatch", startBatch, METH_VARARGS },
{ "finishBatch", finishBatch, METH_VARARGS },
{ "checkGradients", checkGradients, METH_VARARGS },
{ "startMultiviewTest", startMultiviewTest, METH_VARARGS },
{ "startFeatureWriter", startFeatureWriter, METH_VARARGS },
{ "syncWithHost", syncWithHost, METH_VARARGS },
{ NULL, NULL }
};
#if defined(_WIN64) || defined(_WIN32)
extern "C" __declspec(dllexport) void initpyconvnet() {
(void) Py_InitModule("pyconvnet", _ConvNetMethods);
import_array();
}
#else
void INITNAME() {
(void) Py_InitModule(QUOTEME(MODELNAME), _ConvNetMethods);
import_array();
}
#endif
PyObject* initModel(PyObject *self, PyObject *args) {
assert(model == NULL);
PyListObject* pyLayerParams;
int pyMinibatchSize;
int pyDeviceID;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &pyLayerParams,
&pyMinibatchSize,
&pyDeviceID)) {
return NULL;
}
model = new ConvNet(pyLayerParams,
pyMinibatchSize,
pyDeviceID);
model->start();
return Py_BuildValue("i", 0);
}
/*
* Starts training/testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int test = 0;
if (!PyArg_ParseTuple(args, "O!|i",
&PyList_Type, &data,
&test)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
TrainingWorker* wr = new TrainingWorker(*model, *new CPUData(mvec), test);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Starts testing on the given batch (asynchronous -- returns immediately).
*/
PyObject* startMultiviewTest(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int numViews, logregIdx;
if (!PyArg_ParseTuple(args, "O!ii",
&PyList_Type, &data,
&numViews,
&logregIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
MultiviewTestWorker* wr = new MultiviewTestWorker(*model, *new CPUData(mvec), numViews, logregIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
PyObject* startFeatureWriter(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
int layerIdx;
if (!PyArg_ParseTuple(args, "O!i",
&PyList_Type, &data,
&layerIdx)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
Matrix& ftrs = *mvec.back();
mvec.pop_back();
FeatureWorker* wr = new FeatureWorker(*model, *new CPUData(mvec), ftrs, layerIdx);
model->getWorkerQueue().enqueue(wr);
return Py_BuildValue("i", 0);
}
/*
* Waits for the trainer to finish training on the batch given to startBatch.
*/
PyObject* finishBatch(PyObject *self, PyObject *args) {
assert(model != NULL);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
Cost& cost = res->getResults();
PyObject* dict = PyDict_New();
CostMap& costMap = cost.getCostMap();
for (CostMap::const_iterator it = costMap.begin(); it != costMap.end(); ++it) {
PyObject* v = PyList_New(0);
for (vector<double>::const_iterator iv = it->second->begin(); iv != it->second->end(); ++iv) {
PyObject* f = PyFloat_FromDouble(*iv);
PyList_Append(v, f);
}
PyDict_SetItemString(dict, it->first.c_str(), v);
}
PyObject* retVal = Py_BuildValue("Ni", dict, cost.getNumCases());
delete res; // Deletes cost too
return retVal;
}
PyObject* checkGradients(PyObject *self, PyObject *args) {
assert(model != NULL);
PyListObject* data;
if (!PyArg_ParseTuple(args, "O!",
&PyList_Type, &data)) {
return NULL;
}
MatrixV& mvec = *getMatrixV((PyObject*)data);
GradCheckWorker* wr = new GradCheckWorker(*model, *new CPUData(mvec));
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::BATCH_DONE);
delete res;
return Py_BuildValue("i", 0);
}
/*
* Copies weight matrices from GPU to system memory.
*/
PyObject* syncWithHost(PyObject *self, PyObject *args) {
assert(model != NULL);
SyncWorker* wr = new SyncWorker(*model);
model->getWorkerQueue().enqueue(wr);
WorkResult* res = model->getResultQueue().dequeue();
assert(res != NULL);
assert(res->getResultType() == WorkResult::SYNC_DONE);
delete res;
return Py_BuildValue("i", 0);
}
|
cbd94a1a9726935e3cfc5ded89f628cfda86fb74.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicImproperForceGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL 0.001f
/*! \file HarmonicImproperForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic improper forces. Used by HarmonicImproperForceComputeGPU.
*/
//! Texture for reading improper parameters
texture<float2, 1, hipReadModeElementType> improper_params_tex;
//! Kernel for caculating harmonic improper forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial
\param N number of particles
\param d_pos Device memory of particle positions
\param box Box dimensions for periodic boundary condition handling
\param tlist Improper data to use in calculating the forces
\param dihedral_ABCD List of relative atom positions in the dihedrals
\param pitch Pitch of 2D dihedral list
\param n_dihedrals_list List of numbers of dihedrals per atom
*/
extern "C" __global__
void gpu_compute_harmonic_improper_forces_kernel(float4* d_force,
float* d_virial,
const unsigned int virial_pitch,
unsigned int N,
const Scalar4 *d_pos,
BoxDim box,
const uint4 *tlist,
const uint1 *dihedral_ABCD,
const unsigned int pitch,
const unsigned int *n_dihedrals_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_impropers = n_dihedrals_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
float4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet
float3 idx_pos = make_float3(idx_postype.x, idx_postype.y, idx_postype.z);
float3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet
// initialize the force to 0
float4 force_idx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// initialize the virial to 0
float virial_idx[6];
for (int i = 0; i < 6; i++)
virial_idx[i] = 0.0f;
// loop over all impropers
for (int improper_idx = 0; improper_idx < n_impropers; improper_idx++)
{
uint4 cur_improper = tlist[pitch*improper_idx + idx];
uint1 cur_ABCD = dihedral_ABCD[pitch*improper_idx + idx];
int cur_improper_x_idx = cur_improper.x;
int cur_improper_y_idx = cur_improper.y;
int cur_improper_z_idx = cur_improper.z;
int cur_improper_type = cur_improper.w;
int cur_improper_abcd = cur_ABCD.x;
// get the a-particle's position (MEM TRANSFER: 16 bytes)
float4 x_postype = d_pos[cur_improper_x_idx];
float3 x_pos = make_float3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 y_postype = d_pos[cur_improper_y_idx];
float3 y_pos = make_float3(y_postype.x, y_postype.y, y_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 z_postype = d_pos[cur_improper_z_idx];
float3 z_pos = make_float3(z_postype.x, z_postype.y, z_postype.z);
if (cur_improper_abcd == 0)
{
pos_a = idx_pos;
pos_b = x_pos;
pos_c = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 1)
{
pos_b = idx_pos;
pos_a = x_pos;
pos_c = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 2)
{
pos_c = idx_pos;
pos_a = x_pos;
pos_b = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 3)
{
pos_d = idx_pos;
pos_a = x_pos;
pos_b = y_pos;
pos_c = z_pos;
}
// calculate dr for a-b,c-b,and a-c
float3 dab = pos_a - pos_b;
float3 dcb = pos_c - pos_b;
float3 ddc = pos_d - pos_c;
dab = box.minImage(dab);
dcb = box.minImage(dcb);
ddc = box.minImage(ddc);
// get the improper parameters (MEM TRANSFER: 12 bytes)
float2 params = tex1Dfetch(improper_params_tex, cur_improper_type);
float K = params.x;
float chi = params.y;
float r1 = rsqrtf(dot(dab, dab));
float r2 = rsqrtf(dot(dcb, dcb));
float r3 = rsqrtf(dot(ddc, ddc));
float ss1 = r1 * r1;
float ss2 = r2 * r2;
float ss3 = r3 * r3;
// Cosine and Sin of the angle between the planes
float c0 = dot(dab, ddc) * r1 * r3;
float c1 = dot(dab, dcb) * r1 * r2;
float c2 = -dot(ddc, dcb) * r3 * r2;
float s1 = 1.0f - c1*c1;
if (s1 < SMALL) s1 = SMALL;
s1 = 1.0f / s1;
float s2 = 1.0f - c2*c2;
if (s2 < SMALL) s2 = SMALL;
s2 = 1.0f / s2;
float s12 = sqrtf(s1*s2);
float c = (c1*c2 + c0) * s12;
if (c > 1.0f) c = 1.0f;
if (c < -1.0f) c = -1.0f;
float s = sqrtf(1.0f - c*c);
if (s < SMALL) s = SMALL;
float domega = acosf(c) - chi;
float a = K * domega;
// calculate the energy, 1/4th for each atom
//float improper_eng = 0.25*a*domega;
float improper_eng = 0.125f*a*domega; // the .125 term is 1/2 * 1/4
//a = -a * 2.0/s;
a = -a /s; // the missing 2.0 factor is to ensure K/2 is factored in for the forces
c = c * a;
s12 = s12 * a;
float a11 = c*ss1*s1;
float a22 = -ss2 * (2.0f*c0*s12 - c*(s1+s2));
float a33 = c*ss3*s2;
float a12 = -r1*r2*(c1*c*s1 + c2*s12);
float a13 = -r1*r3*s12;
float a23 = r2*r3*(c2*c*s2 + c1*s12);
float sx2 = a22*dcb.x + a23*ddc.x + a12*dab.x;
float sy2 = a22*dcb.y + a23*ddc.y + a12*dab.y;
float sz2 = a22*dcb.z + a23*ddc.z + a12*dab.z;
// calculate the forces for each particle
float ffax = a12*dcb.x + a13*ddc.x + a11*dab.x;
float ffay = a12*dcb.y + a13*ddc.y + a11*dab.y;
float ffaz = a12*dcb.z + a13*ddc.z + a11*dab.z;
float ffbx = -sx2 - ffax;
float ffby = -sy2 - ffay;
float ffbz = -sz2 - ffaz;
float ffdx = a23*dcb.x + a33*ddc.x + a13*dab.x;
float ffdy = a23*dcb.y + a33*ddc.y + a13*dab.y;
float ffdz = a23*dcb.z + a33*ddc.z + a13*dab.z;
float ffcx = sx2 - ffdx;
float ffcy = sy2 - ffdy;
float ffcz = sz2 - ffdz;
// and calculate the virial (upper triangular version)
float improper_virial[6];
improper_virial[0] = float(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx);
improper_virial[1] = float(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx);
improper_virial[2] = float(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx);
improper_virial[3] = float(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy);
improper_virial[4] = float(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy);
improper_virial[5] = float(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz);
if (cur_improper_abcd == 0)
{
force_idx.x += ffax;
force_idx.y += ffay;
force_idx.z += ffaz;
}
if (cur_improper_abcd == 1)
{
force_idx.x += ffbx;
force_idx.y += ffby;
force_idx.z += ffbz;
}
if (cur_improper_abcd == 2)
{
force_idx.x += ffcx;
force_idx.y += ffcy;
force_idx.z += ffcz;
}
if (cur_improper_abcd == 3)
{
force_idx.x += ffdx;
force_idx.y += ffdy;
force_idx.z += ffdz;
}
force_idx.w += improper_eng;
for (int k = 0; k < 6; k++)
virial_idx[k] += improper_virial[k];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int k = 0; k < 6; k++)
d_virial[k*virial_pitch+idx] = virial_idx[k];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param tlist Dihedral data to use in calculating the forces
\param dihedral_ABCD List of relative atom positions in the dihedrals
\param pitch Pitch of 2D dihedral list
\param n_dihedrals_list List of numbers of dihedrals per atom
\param d_params K, sign,multiplicity params packed as padded float4 variables
\param n_improper_types Number of improper types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one float4 element per improper type. The x component contains K the spring constant
and the y component contains sign, and the z component the multiplicity.
*/
hipError_t gpu_compute_harmonic_improper_forces(float4* d_force,
float* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const uint4 *tlist,
const uint1 *dihedral_ABCD,
const unsigned int pitch,
const unsigned int *n_dihedrals_list,
float2 *d_params,
unsigned int n_improper_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the texture
hipError_t error = hipBindTexture(0, improper_params_tex, d_params, sizeof(float2) * n_improper_types);
if (error != hipSuccess)
return error;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_harmonic_improper_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list);
return hipSuccess;
}
|
cbd94a1a9726935e3cfc5ded89f628cfda86fb74.cu
|
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicImproperForceGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL 0.001f
/*! \file HarmonicImproperForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic improper forces. Used by HarmonicImproperForceComputeGPU.
*/
//! Texture for reading improper parameters
texture<float2, 1, cudaReadModeElementType> improper_params_tex;
//! Kernel for caculating harmonic improper forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial
\param N number of particles
\param d_pos Device memory of particle positions
\param box Box dimensions for periodic boundary condition handling
\param tlist Improper data to use in calculating the forces
\param dihedral_ABCD List of relative atom positions in the dihedrals
\param pitch Pitch of 2D dihedral list
\param n_dihedrals_list List of numbers of dihedrals per atom
*/
extern "C" __global__
void gpu_compute_harmonic_improper_forces_kernel(float4* d_force,
float* d_virial,
const unsigned int virial_pitch,
unsigned int N,
const Scalar4 *d_pos,
BoxDim box,
const uint4 *tlist,
const uint1 *dihedral_ABCD,
const unsigned int pitch,
const unsigned int *n_dihedrals_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_impropers = n_dihedrals_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
float4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet
float3 idx_pos = make_float3(idx_postype.x, idx_postype.y, idx_postype.z);
float3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet
// initialize the force to 0
float4 force_idx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// initialize the virial to 0
float virial_idx[6];
for (int i = 0; i < 6; i++)
virial_idx[i] = 0.0f;
// loop over all impropers
for (int improper_idx = 0; improper_idx < n_impropers; improper_idx++)
{
uint4 cur_improper = tlist[pitch*improper_idx + idx];
uint1 cur_ABCD = dihedral_ABCD[pitch*improper_idx + idx];
int cur_improper_x_idx = cur_improper.x;
int cur_improper_y_idx = cur_improper.y;
int cur_improper_z_idx = cur_improper.z;
int cur_improper_type = cur_improper.w;
int cur_improper_abcd = cur_ABCD.x;
// get the a-particle's position (MEM TRANSFER: 16 bytes)
float4 x_postype = d_pos[cur_improper_x_idx];
float3 x_pos = make_float3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 y_postype = d_pos[cur_improper_y_idx];
float3 y_pos = make_float3(y_postype.x, y_postype.y, y_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 z_postype = d_pos[cur_improper_z_idx];
float3 z_pos = make_float3(z_postype.x, z_postype.y, z_postype.z);
if (cur_improper_abcd == 0)
{
pos_a = idx_pos;
pos_b = x_pos;
pos_c = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 1)
{
pos_b = idx_pos;
pos_a = x_pos;
pos_c = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 2)
{
pos_c = idx_pos;
pos_a = x_pos;
pos_b = y_pos;
pos_d = z_pos;
}
if (cur_improper_abcd == 3)
{
pos_d = idx_pos;
pos_a = x_pos;
pos_b = y_pos;
pos_c = z_pos;
}
// calculate dr for a-b,c-b,and a-c
float3 dab = pos_a - pos_b;
float3 dcb = pos_c - pos_b;
float3 ddc = pos_d - pos_c;
dab = box.minImage(dab);
dcb = box.minImage(dcb);
ddc = box.minImage(ddc);
// get the improper parameters (MEM TRANSFER: 12 bytes)
float2 params = tex1Dfetch(improper_params_tex, cur_improper_type);
float K = params.x;
float chi = params.y;
float r1 = rsqrtf(dot(dab, dab));
float r2 = rsqrtf(dot(dcb, dcb));
float r3 = rsqrtf(dot(ddc, ddc));
float ss1 = r1 * r1;
float ss2 = r2 * r2;
float ss3 = r3 * r3;
// Cosine and Sin of the angle between the planes
float c0 = dot(dab, ddc) * r1 * r3;
float c1 = dot(dab, dcb) * r1 * r2;
float c2 = -dot(ddc, dcb) * r3 * r2;
float s1 = 1.0f - c1*c1;
if (s1 < SMALL) s1 = SMALL;
s1 = 1.0f / s1;
float s2 = 1.0f - c2*c2;
if (s2 < SMALL) s2 = SMALL;
s2 = 1.0f / s2;
float s12 = sqrtf(s1*s2);
float c = (c1*c2 + c0) * s12;
if (c > 1.0f) c = 1.0f;
if (c < -1.0f) c = -1.0f;
float s = sqrtf(1.0f - c*c);
if (s < SMALL) s = SMALL;
float domega = acosf(c) - chi;
float a = K * domega;
// calculate the energy, 1/4th for each atom
//float improper_eng = 0.25*a*domega;
float improper_eng = 0.125f*a*domega; // the .125 term is 1/2 * 1/4
//a = -a * 2.0/s;
a = -a /s; // the missing 2.0 factor is to ensure K/2 is factored in for the forces
c = c * a;
s12 = s12 * a;
float a11 = c*ss1*s1;
float a22 = -ss2 * (2.0f*c0*s12 - c*(s1+s2));
float a33 = c*ss3*s2;
float a12 = -r1*r2*(c1*c*s1 + c2*s12);
float a13 = -r1*r3*s12;
float a23 = r2*r3*(c2*c*s2 + c1*s12);
float sx2 = a22*dcb.x + a23*ddc.x + a12*dab.x;
float sy2 = a22*dcb.y + a23*ddc.y + a12*dab.y;
float sz2 = a22*dcb.z + a23*ddc.z + a12*dab.z;
// calculate the forces for each particle
float ffax = a12*dcb.x + a13*ddc.x + a11*dab.x;
float ffay = a12*dcb.y + a13*ddc.y + a11*dab.y;
float ffaz = a12*dcb.z + a13*ddc.z + a11*dab.z;
float ffbx = -sx2 - ffax;
float ffby = -sy2 - ffay;
float ffbz = -sz2 - ffaz;
float ffdx = a23*dcb.x + a33*ddc.x + a13*dab.x;
float ffdy = a23*dcb.y + a33*ddc.y + a13*dab.y;
float ffdz = a23*dcb.z + a33*ddc.z + a13*dab.z;
float ffcx = sx2 - ffdx;
float ffcy = sy2 - ffdy;
float ffcz = sz2 - ffdz;
// and calculate the virial (upper triangular version)
float improper_virial[6];
improper_virial[0] = float(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx);
improper_virial[1] = float(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx);
improper_virial[2] = float(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx);
improper_virial[3] = float(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy);
improper_virial[4] = float(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy);
improper_virial[5] = float(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz);
if (cur_improper_abcd == 0)
{
force_idx.x += ffax;
force_idx.y += ffay;
force_idx.z += ffaz;
}
if (cur_improper_abcd == 1)
{
force_idx.x += ffbx;
force_idx.y += ffby;
force_idx.z += ffbz;
}
if (cur_improper_abcd == 2)
{
force_idx.x += ffcx;
force_idx.y += ffcy;
force_idx.z += ffcz;
}
if (cur_improper_abcd == 3)
{
force_idx.x += ffdx;
force_idx.y += ffdy;
force_idx.z += ffdz;
}
force_idx.w += improper_eng;
for (int k = 0; k < 6; k++)
virial_idx[k] += improper_virial[k];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int k = 0; k < 6; k++)
d_virial[k*virial_pitch+idx] = virial_idx[k];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos particle positions on the device
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param tlist Dihedral data to use in calculating the forces
\param dihedral_ABCD List of relative atom positions in the dihedrals
\param pitch Pitch of 2D dihedral list
\param n_dihedrals_list List of numbers of dihedrals per atom
\param d_params K, sign,multiplicity params packed as padded float4 variables
\param n_improper_types Number of improper types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one float4 element per improper type. The x component contains K the spring constant
and the y component contains sign, and the z component the multiplicity.
*/
cudaError_t gpu_compute_harmonic_improper_forces(float4* d_force,
float* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const uint4 *tlist,
const uint1 *dihedral_ABCD,
const unsigned int pitch,
const unsigned int *n_dihedrals_list,
float2 *d_params,
unsigned int n_improper_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the texture
cudaError_t error = cudaBindTexture(0, improper_params_tex, d_params, sizeof(float2) * n_improper_types);
if (error != cudaSuccess)
return error;
// run the kernel
gpu_compute_harmonic_improper_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list);
return cudaSuccess;
}
|
4ff0f706f8ecc7f2829d33a3f55c193f9c8766f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, hipGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 1024
__device__ void warpReduce(volatile unsigned int* sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = dVec[i] + dVec[i+blockDim.x];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32) warpReduce(sdata, tid);
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
int main(int argc, char** argv)
{
unsigned int *vec;
unsigned int *dVec, *dAux;
size_t N0 = 32768;
size_t N = N0*N0;
vec = (unsigned int*) malloc (sizeof(unsigned int)*N);
for (size_t i = 0; i < N; i++) vec[i] = i;
hipMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec");
hipMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux");
hipMemcpy(dVec, vec, sizeof(unsigned int)*N, hipMemcpyHostToDevice); checkCUDAError("Error copying vec");
auto startTime = std::chrono::system_clock::now();
for (size_t n = N; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE;
size_t gSize = floor((double)n / (2.0*(double)BLOCKSIZE));
if (gSize == 0) { gSize = 2; bSize = n/4; }
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
hipLaunchKernelGGL(( reduce), dim3(gSize), dim3(bSize), 0, 0, dVec, dAux, n); checkCUDAError("Failed Kernel Launch");
unsigned int *tmp = dVec; dVec = dAux; dAux = tmp;
}
hipDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
unsigned int result = 0.0;
hipMemcpy(&result, dVec, sizeof(unsigned int), hipMemcpyDeviceToHost); checkCUDAError("Error getting result");
printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
4ff0f706f8ecc7f2829d33a3f55c193f9c8766f5.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 1024
__device__ void warpReduce(volatile unsigned int* sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = dVec[i] + dVec[i+blockDim.x];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32) warpReduce(sdata, tid);
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
int main(int argc, char** argv)
{
unsigned int *vec;
unsigned int *dVec, *dAux;
size_t N0 = 32768;
size_t N = N0*N0;
vec = (unsigned int*) malloc (sizeof(unsigned int)*N);
for (size_t i = 0; i < N; i++) vec[i] = i;
cudaMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec");
cudaMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux");
cudaMemcpy(dVec, vec, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying vec");
auto startTime = std::chrono::system_clock::now();
for (size_t n = N; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE;
size_t gSize = floor((double)n / (2.0*(double)BLOCKSIZE));
if (gSize == 0) { gSize = 2; bSize = n/4; }
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
reduce<<<gSize, bSize>>>(dVec, dAux, n); checkCUDAError("Failed Kernel Launch");
unsigned int *tmp = dVec; dVec = dAux; dAux = tmp;
}
cudaDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
unsigned int result = 0.0;
cudaMemcpy(&result, dVec, sizeof(unsigned int), cudaMemcpyDeviceToHost); checkCUDAError("Error getting result");
printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
0317c511cd1608685a30fa4f4db3281690740210.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "asvgf/asvgf.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
// TEA = Tiny Encryption Algorithm.
// https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
inline __device__ void encryptTea(uint2& arg)
{
const uint32_t key[] = {
0xa341316c,
0xc8013ea4,
0xad90777d,
0x7e95761e,
};
uint32_t v0 = arg.x;
uint32_t v1 = arg.y;
uint32_t sum = 0;
uint32_t delta = 0x9e3779b9;
for (int i = 0; i < 16; i++) {
sum += delta;
v0 += ((v1 << 4) + key[0]) ^ (v1 + sum) ^ ((v1 >> 5) + key[1]);
v1 += ((v0 << 4) + key[2]) ^ (v0 + sum) ^ ((v0 >> 5) + key[3]);
}
arg.x = v0;
arg.y = v1;
}
inline __device__ bool testReprojectedDepth(float z1, float z2, float dz)
{
float diffZ = abs(z1 - z2);
return diffZ < 2.0 * (dz + 1e-3f);
}
#define AT_IS_INBOUND(x, a, b) (((a) <= (x)) && ((x) < (b)))
__global__ void doForwardProjection(
int4* gradientSample,
const float4* __restrict__ curAovNormalDepth,
const float4* __restrict__ prevAovNormalDepth,
float4* curAovTexclrMeshid,
const float4* __restrict__ prevAovTexclrMeshid,
int* curRngSeed,
const int* __restrict__ prevRngSeed,
int frame,
int width, int height,
int gradientTileSize,
float cameraDistance,
hipSurfaceObject_t motionDetphBuffer,
int* executedIdxArray)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
int idx = getIdx(ix, iy, width);
// Compute randomized position as previous position.
uint2 teaArg = make_uint2(idx, frame);
encryptTea(teaArg);
teaArg.x %= gradientTileSize;
teaArg.y %= gradientTileSize;
int2 prevPos = make_int2(
ix * gradientTileSize + teaArg.x,
iy * gradientTileSize + teaArg.y);
float4 motionDepth;
surf2Dread(&motionDepth, motionDetphBuffer, prevPos.x * sizeof(float4), prevPos.y);
// NOTE
// motion = prev - cur
// => -motion = cur - prev
// => prev + (-motion) = prev + (cur - prev) = cur
int2 curPos = make_int2(prevPos.x - motionDepth.x, prevPos.y - motionDepth.y);
// Check if position is in screen.
if (!AT_IS_INBOUND(curPos.x, 0, width)
|| !AT_IS_INBOUND(curPos.y, 0, height))
{
return;
}
int curIdx = getIdx(curPos.x, curPos.y, width);
int prevIdx = getIdx(prevPos.x, prevPos.y, width);
float4 curNmlDepth = curAovNormalDepth[curIdx];
float4 prevNmlDepth = curAovNormalDepth[prevIdx];
float pixelDistanceRatio = (curNmlDepth.w / cameraDistance) * height;
bool accept = testReprojectedDepth(curNmlDepth.w, prevNmlDepth.w, pixelDistanceRatio);
if (!accept) {
return;
}
// Remove depth.
curNmlDepth.w = prevNmlDepth.w = 0;
accept = (dot(curNmlDepth, prevNmlDepth) > 0.9f);
if (!accept) {
return;
}
int2 tilePos = make_int2(
curPos.x % gradientTileSize,
curPos.y % gradientTileSize);
// NOTE
// Atomic functions for CUDA.
// http://www.slis.tsukuba.ac.jp/~fujisawa.makoto.fu/cgi-bin/wiki/index.php?CUDA%A5%A2%A5%C8%A5%DF%A5%C3%A5%AF%B4%D8%BF%F4
int res = atomicCAS(&executedIdxArray[idx], -1, idx);
if (res < 0) {
// NOTE
// w is not used.
int downSizedWidth = (width + gradientTileSize - 1) / gradientTileSize;
int downSizedIdx = getIdx(
curPos.x / gradientTileSize,
curPos.y / gradientTileSize,
downSizedWidth);
gradientSample[downSizedIdx] = make_int4(tilePos.x, tilePos.y, prevIdx, 0);
// Albedo and Mesh id.
curAovTexclrMeshid[curIdx] = prevAovTexclrMeshid[prevIdx];
// Rng seed later.
curRngSeed[curIdx] = prevRngSeed[prevIdx];
}
}
|
0317c511cd1608685a30fa4f4db3281690740210.cu
|
#include "asvgf/asvgf.h"
#include "kernel/pt_common.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
// TEA = Tiny Encryption Algorithm.
// https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
inline __device__ void encryptTea(uint2& arg)
{
const uint32_t key[] = {
0xa341316c,
0xc8013ea4,
0xad90777d,
0x7e95761e,
};
uint32_t v0 = arg.x;
uint32_t v1 = arg.y;
uint32_t sum = 0;
uint32_t delta = 0x9e3779b9;
for (int i = 0; i < 16; i++) {
sum += delta;
v0 += ((v1 << 4) + key[0]) ^ (v1 + sum) ^ ((v1 >> 5) + key[1]);
v1 += ((v0 << 4) + key[2]) ^ (v0 + sum) ^ ((v0 >> 5) + key[3]);
}
arg.x = v0;
arg.y = v1;
}
inline __device__ bool testReprojectedDepth(float z1, float z2, float dz)
{
float diffZ = abs(z1 - z2);
return diffZ < 2.0 * (dz + 1e-3f);
}
#define AT_IS_INBOUND(x, a, b) (((a) <= (x)) && ((x) < (b)))
__global__ void doForwardProjection(
int4* gradientSample,
const float4* __restrict__ curAovNormalDepth,
const float4* __restrict__ prevAovNormalDepth,
float4* curAovTexclrMeshid,
const float4* __restrict__ prevAovTexclrMeshid,
int* curRngSeed,
const int* __restrict__ prevRngSeed,
int frame,
int width, int height,
int gradientTileSize,
float cameraDistance,
cudaSurfaceObject_t motionDetphBuffer,
int* executedIdxArray)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
int idx = getIdx(ix, iy, width);
// Compute randomized position as previous position.
uint2 teaArg = make_uint2(idx, frame);
encryptTea(teaArg);
teaArg.x %= gradientTileSize;
teaArg.y %= gradientTileSize;
int2 prevPos = make_int2(
ix * gradientTileSize + teaArg.x,
iy * gradientTileSize + teaArg.y);
float4 motionDepth;
surf2Dread(&motionDepth, motionDetphBuffer, prevPos.x * sizeof(float4), prevPos.y);
// NOTE
// motion = prev - cur
// => -motion = cur - prev
// => prev + (-motion) = prev + (cur - prev) = cur
int2 curPos = make_int2(prevPos.x - motionDepth.x, prevPos.y - motionDepth.y);
// Check if position is in screen.
if (!AT_IS_INBOUND(curPos.x, 0, width)
|| !AT_IS_INBOUND(curPos.y, 0, height))
{
return;
}
int curIdx = getIdx(curPos.x, curPos.y, width);
int prevIdx = getIdx(prevPos.x, prevPos.y, width);
float4 curNmlDepth = curAovNormalDepth[curIdx];
float4 prevNmlDepth = curAovNormalDepth[prevIdx];
float pixelDistanceRatio = (curNmlDepth.w / cameraDistance) * height;
bool accept = testReprojectedDepth(curNmlDepth.w, prevNmlDepth.w, pixelDistanceRatio);
if (!accept) {
return;
}
// Remove depth.
curNmlDepth.w = prevNmlDepth.w = 0;
accept = (dot(curNmlDepth, prevNmlDepth) > 0.9f);
if (!accept) {
return;
}
int2 tilePos = make_int2(
curPos.x % gradientTileSize,
curPos.y % gradientTileSize);
// NOTE
// Atomic functions for CUDA.
// http://www.slis.tsukuba.ac.jp/~fujisawa.makoto.fu/cgi-bin/wiki/index.php?CUDA%A5%A2%A5%C8%A5%DF%A5%C3%A5%AF%B4%D8%BF%F4
int res = atomicCAS(&executedIdxArray[idx], -1, idx);
if (res < 0) {
// NOTE
// w is not used.
int downSizedWidth = (width + gradientTileSize - 1) / gradientTileSize;
int downSizedIdx = getIdx(
curPos.x / gradientTileSize,
curPos.y / gradientTileSize,
downSizedWidth);
gradientSample[downSizedIdx] = make_int4(tilePos.x, tilePos.y, prevIdx, 0);
// Albedo and Mesh id.
curAovTexclrMeshid[curIdx] = prevAovTexclrMeshid[prevIdx];
// Rng seed later.
curRngSeed[curIdx] = prevRngSeed[prevIdx];
}
}
|
91b8f185d0651bc326bcc0901db11881377ea109.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/cuda_helpers.c"
extern "C"{
#include "reduction_helpers.c"
}
__global__ void
reduce(double *input, double *output,int numElements)
{
const int tid = threadIdx.x;
const int idx = tid + blockIdx.x * blockDim.x;
double *offset = input + blockIdx.x * blockDim.x;
if(idx >= numElements) return;
for(int stride = 1; stride < blockDim.x; stride*=2){
int index = 2*tid*stride;
int neighbor = index + stride;
if(neighbor < blockDim.x){
offset[index] += offset[neighbor];
}
__syncthreads();
}
if(tid==0){output[blockIdx.x] = offset[0];}
}
int
main(void)
{
unsigned const int num_elements = 1<<13;
const size_t size = num_elements*sizeof(double);
double *orig_input;
double *h_input;
double *h_output;
double *d_input;
double *d_output;
initialize_host(size,&h_input,&h_output);
for(int i = 0; i < num_elements;i++){
h_input[i] = (double)i;
h_output[i] = 0;
}
initialize_device(size,&d_input,&d_output);
copy_host_to_device(size,h_input,h_output,d_input,d_output);
orig_input = (double *)malloc(size);
memcpy(orig_input,h_input,size);
int threadsPerBlock = DIM;
int blocksPerGrid =(num_elements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( reduce), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input,d_output, num_elements);
copy_device_to_host(size,h_input,h_output,d_input,d_output);
check_reduction(orig_input,h_output,num_elements,blocksPerGrid);
printf("DESTROYING\n");
destroy_host(h_input,h_output);
free(orig_input);
}
|
91b8f185d0651bc326bcc0901db11881377ea109.cu
|
#include "../common/cuda_helpers.c"
extern "C"{
#include "reduction_helpers.c"
}
__global__ void
reduce(double *input, double *output,int numElements)
{
const int tid = threadIdx.x;
const int idx = tid + blockIdx.x * blockDim.x;
double *offset = input + blockIdx.x * blockDim.x;
if(idx >= numElements) return;
for(int stride = 1; stride < blockDim.x; stride*=2){
int index = 2*tid*stride;
int neighbor = index + stride;
if(neighbor < blockDim.x){
offset[index] += offset[neighbor];
}
__syncthreads();
}
if(tid==0){output[blockIdx.x] = offset[0];}
}
int
main(void)
{
unsigned const int num_elements = 1<<13;
const size_t size = num_elements*sizeof(double);
double *orig_input;
double *h_input;
double *h_output;
double *d_input;
double *d_output;
initialize_host(size,&h_input,&h_output);
for(int i = 0; i < num_elements;i++){
h_input[i] = (double)i;
h_output[i] = 0;
}
initialize_device(size,&d_input,&d_output);
copy_host_to_device(size,h_input,h_output,d_input,d_output);
orig_input = (double *)malloc(size);
memcpy(orig_input,h_input,size);
int threadsPerBlock = DIM;
int blocksPerGrid =(num_elements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
reduce<<<blocksPerGrid, threadsPerBlock>>>(d_input,d_output, num_elements);
copy_device_to_host(size,h_input,h_output,d_input,d_output);
check_reduction(orig_input,h_output,num_elements,blocksPerGrid);
printf("DESTROYING\n");
destroy_host(h_input,h_output);
free(orig_input);
}
|
7819ea5d0171190049d79612a430898ea030089a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <thrust/sequence.h>
#include <random>
#include "rmm/cuda_stream_view.hpp"
template <typename T>
struct TypedScalarDeviceViewTest : public cudf::test::BaseFixture {
};
TYPED_TEST_SUITE(TypedScalarDeviceViewTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
template <typename ScalarDeviceViewType>
__global__ void test_set_value(ScalarDeviceViewType s, ScalarDeviceViewType s1)
{
s1.set_value(s.value());
s1.set_valid(true);
}
template <typename ScalarDeviceViewType>
__global__ void test_value(ScalarDeviceViewType s, ScalarDeviceViewType s1, bool* result)
{
*result = (s.value() == s1.value());
}
TYPED_TEST(TypedScalarDeviceViewTest, Value)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(7);
TypeParam value1 = cudf::test::make_type_param_scalar<TypeParam>(11);
cudf::scalar_type_t<TypeParam> s(value);
cudf::scalar_type_t<TypeParam> s1{value1};
auto scalar_device_view = cudf::get_scalar_device_view(s);
auto scalar_device_view1 = cudf::get_scalar_device_view(s1);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
hipLaunchKernelGGL(( test_set_value), dim3(1), dim3(1), 0, 0, scalar_device_view, scalar_device_view1);
CHECK_CUDA(0);
EXPECT_EQ(s1.value(), value);
EXPECT_TRUE(s1.is_valid());
hipLaunchKernelGGL(( test_value), dim3(1), dim3(1), 0, 0, scalar_device_view, scalar_device_view1, result.data());
CHECK_CUDA(0);
EXPECT_TRUE(result.value(rmm::cuda_stream_default));
}
template <typename ScalarDeviceViewType>
__global__ void test_null(ScalarDeviceViewType s, bool* result)
{
*result = s.is_valid();
}
TYPED_TEST(TypedScalarDeviceViewTest, ConstructNull)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(5);
cudf::scalar_type_t<TypeParam> s(value, false);
auto scalar_device_view = cudf::get_scalar_device_view(s);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
hipLaunchKernelGGL(( test_null), dim3(1), dim3(1), 0, 0, scalar_device_view, result.data());
CHECK_CUDA(0);
EXPECT_FALSE(result.value(rmm::cuda_stream_default));
}
template <typename ScalarDeviceViewType>
__global__ void test_setnull(ScalarDeviceViewType s)
{
s.set_valid(false);
}
TYPED_TEST(TypedScalarDeviceViewTest, SetNull)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(5);
cudf::scalar_type_t<TypeParam> s{value};
auto scalar_device_view = cudf::get_scalar_device_view(s);
s.set_valid_async(true);
EXPECT_TRUE(s.is_valid());
hipLaunchKernelGGL(( test_setnull), dim3(1), dim3(1), 0, 0, scalar_device_view);
CHECK_CUDA(0);
EXPECT_FALSE(s.is_valid());
}
struct StringScalarDeviceViewTest : public cudf::test::BaseFixture {
};
__global__ void test_string_value(cudf::string_scalar_device_view s,
const char* value,
cudf::size_type size,
bool* result)
{
*result = (s.value() == cudf::string_view(value, size));
}
TEST_F(StringScalarDeviceViewTest, Value)
{
std::string value("test string");
cudf::string_scalar s(value);
auto scalar_device_view = cudf::get_scalar_device_view(s);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
auto value_v = cudf::detail::make_device_uvector_sync(value);
hipLaunchKernelGGL(( test_string_value), dim3(1), dim3(1), 0, 0, scalar_device_view, value_v.data(), value.size(), result.data());
CHECK_CUDA(0);
EXPECT_TRUE(result.value(rmm::cuda_stream_default));
}
|
7819ea5d0171190049d79612a430898ea030089a.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <thrust/sequence.h>
#include <random>
#include "rmm/cuda_stream_view.hpp"
template <typename T>
struct TypedScalarDeviceViewTest : public cudf::test::BaseFixture {
};
TYPED_TEST_SUITE(TypedScalarDeviceViewTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
template <typename ScalarDeviceViewType>
__global__ void test_set_value(ScalarDeviceViewType s, ScalarDeviceViewType s1)
{
s1.set_value(s.value());
s1.set_valid(true);
}
template <typename ScalarDeviceViewType>
__global__ void test_value(ScalarDeviceViewType s, ScalarDeviceViewType s1, bool* result)
{
*result = (s.value() == s1.value());
}
TYPED_TEST(TypedScalarDeviceViewTest, Value)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(7);
TypeParam value1 = cudf::test::make_type_param_scalar<TypeParam>(11);
cudf::scalar_type_t<TypeParam> s(value);
cudf::scalar_type_t<TypeParam> s1{value1};
auto scalar_device_view = cudf::get_scalar_device_view(s);
auto scalar_device_view1 = cudf::get_scalar_device_view(s1);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
test_set_value<<<1, 1>>>(scalar_device_view, scalar_device_view1);
CHECK_CUDA(0);
EXPECT_EQ(s1.value(), value);
EXPECT_TRUE(s1.is_valid());
test_value<<<1, 1>>>(scalar_device_view, scalar_device_view1, result.data());
CHECK_CUDA(0);
EXPECT_TRUE(result.value(rmm::cuda_stream_default));
}
template <typename ScalarDeviceViewType>
__global__ void test_null(ScalarDeviceViewType s, bool* result)
{
*result = s.is_valid();
}
TYPED_TEST(TypedScalarDeviceViewTest, ConstructNull)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(5);
cudf::scalar_type_t<TypeParam> s(value, false);
auto scalar_device_view = cudf::get_scalar_device_view(s);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
test_null<<<1, 1>>>(scalar_device_view, result.data());
CHECK_CUDA(0);
EXPECT_FALSE(result.value(rmm::cuda_stream_default));
}
template <typename ScalarDeviceViewType>
__global__ void test_setnull(ScalarDeviceViewType s)
{
s.set_valid(false);
}
TYPED_TEST(TypedScalarDeviceViewTest, SetNull)
{
TypeParam value = cudf::test::make_type_param_scalar<TypeParam>(5);
cudf::scalar_type_t<TypeParam> s{value};
auto scalar_device_view = cudf::get_scalar_device_view(s);
s.set_valid_async(true);
EXPECT_TRUE(s.is_valid());
test_setnull<<<1, 1>>>(scalar_device_view);
CHECK_CUDA(0);
EXPECT_FALSE(s.is_valid());
}
struct StringScalarDeviceViewTest : public cudf::test::BaseFixture {
};
__global__ void test_string_value(cudf::string_scalar_device_view s,
const char* value,
cudf::size_type size,
bool* result)
{
*result = (s.value() == cudf::string_view(value, size));
}
TEST_F(StringScalarDeviceViewTest, Value)
{
std::string value("test string");
cudf::string_scalar s(value);
auto scalar_device_view = cudf::get_scalar_device_view(s);
rmm::device_scalar<bool> result{rmm::cuda_stream_default};
auto value_v = cudf::detail::make_device_uvector_sync(value);
test_string_value<<<1, 1>>>(scalar_device_view, value_v.data(), value.size(), result.data());
CHECK_CUDA(0);
EXPECT_TRUE(result.value(rmm::cuda_stream_default));
}
|
ca6fb421624e72e6847482323a8609a7c4e72115.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < N; index += blockDim.x*gridDim.x){
result[index] = alpha * x[index] + y[index];
}
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: do we need to allocate device memory buffers on the GPU here?
//
hipMallocManaged(&device_x, total_elems*sizeof(float));
hipMallocManaged(&device_y, total_elems*sizeof(float));
hipMallocManaged(&device_result, total_elems*sizeof(float));
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: do we need copy here?
//
hipMemcpy(device_x, xarray, total_elems*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_y, yarray, total_elems*sizeof(float), hipMemcpyHostToDevice);
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
int blocksPerGrid = (total_elems + threadsPerBlock - 1) / threadsPerBlock;
// run saxpy_kernel on the GPU
hipLaunchKernelGGL(( saxpy_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, total_elems, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
hipDeviceSynchronize();
double endGPUTime = CycleTimer::currentSeconds();
double timeKernel = endGPUTime - startGPUTime;
timeKernelAvg += timeKernel;
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: copy result from GPU using hipMemcpy
//
hipMemcpy(resultarray, device_result, total_elems*sizeof(float), hipMemcpyDeviceToHost);
// What would be copy time when we use UVM?
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
//
// TODO free device memory if you allocate some device memory earlier in this function.
//
hipFree(device_x);
hipFree(device_y);
hipFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
ca6fb421624e72e6847482323a8609a7c4e72115.cu
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
saxpy_kernel(int N, float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
for(int index = blockIdx.x * blockDim.x + threadIdx.x; index < N; index += blockDim.x*gridDim.x){
result[index] = alpha * x[index] + y[index];
}
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 512; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: do we need to allocate device memory buffers on the GPU here?
//
cudaMallocManaged(&device_x, total_elems*sizeof(float));
cudaMallocManaged(&device_y, total_elems*sizeof(float));
cudaMallocManaged(&device_result, total_elems*sizeof(float));
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//
// TODO: do we need copy here?
//
cudaMemcpy(device_x, xarray, total_elems*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_y, yarray, total_elems*sizeof(float), cudaMemcpyHostToDevice);
//
// TODO: insert time here to begin timing only the kernel
//
double startGPUTime = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
int blocksPerGrid = (total_elems + threadsPerBlock - 1) / threadsPerBlock;
// run saxpy_kernel on the GPU
saxpy_kernel<<<blocksPerGrid, threadsPerBlock>>>(total_elems, alpha, device_x, device_y, device_result);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
cudaDeviceSynchronize();
double endGPUTime = CycleTimer::currentSeconds();
double timeKernel = endGPUTime - startGPUTime;
timeKernelAvg += timeKernel;
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: copy result from GPU using cudaMemcpy
//
cudaMemcpy(resultarray, device_result, total_elems*sizeof(float), cudaMemcpyDeviceToHost);
// What would be copy time when we use UVM?
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
//
// TODO free device memory if you allocate some device memory earlier in this function.
//
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
35e038a8b63ae52cf4efa0825c38bf2ec35506eb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
PLR - Parallelized Linear Recurrences [float]
Copyright (c) 2018 Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University http://www.txstate.edu/ocir/.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Sepideh Maleki and Martin Burtscher
non-recursive coefficients: (0.280990)
recursive coefficients: (0.719010)
*** Simulating fwd + rev recursive filter ***
*/
#include <cstdio>
#include <cassert>
#include <hip/hip_runtime.h>
typedef float T;
static const int device = 0;
static const int order = 1;
static const int warp_size = 32;
static const int block_size = 1024;
static __device__ const T facA[264] = {7.190100e-01f, 5.169754e-01f, 3.717105e-01f, 2.672636e-01f, 1.921652e-01f, 1.381687e-01f, 9.934466e-02f, 7.142980e-02f, 5.135874e-02f, 3.692745e-02f, 2.655121e-02f, 1.909058e-02f, 1.372632e-02f, 9.869360e-03f, 7.096169e-03f, 5.102216e-03f, 3.668545e-03f, 2.637720e-03f, 1.896547e-03f, 1.363636e-03f, 9.804681e-04f, 7.049664e-04f, 5.068778e-04f, 3.644502e-04f, 2.620434e-04f, 1.884118e-04f, 1.354700e-04f, 9.740427e-05f, 7.003464e-05f, 5.035561e-05f, 3.620618e-05f, 2.603261e-05f, 1.871771e-05f, 1.345822e-05f, 9.676593e-06f, 6.957568e-06f, 5.002561e-06f, 3.596891e-06f, 2.586201e-06f, 1.859504e-06f, 1.337002e-06f, 9.613179e-07f, 6.911972e-07f, 4.969777e-07f, 3.573319e-07f, 2.569252e-07f, 1.847318e-07f, 1.328240e-07f, 9.550178e-08f, 6.866674e-08f, 4.937207e-08f, 3.549901e-08f, 2.552414e-08f, 1.835211e-08f, 1.319535e-08f, 9.487591e-09f, 6.821673e-09f, 4.904851e-09f, 3.526637e-09f, 2.535687e-09f, 1.823184e-09f, 1.310888e-09f, 9.425415e-10f, 6.776967e-10f, 4.872707e-10f, 3.503525e-10f, 2.519070e-10f, 1.811236e-10f, 1.302297e-10f, 9.363646e-11f, 6.732555e-11f, 4.840774e-11f, 3.480565e-11f, 2.502561e-11f, 1.799367e-11f, 1.293763e-11f, 9.302282e-12f, 6.688434e-12f, 4.809051e-12f, 3.457756e-12f, 2.486161e-12f, 1.787574e-12f, 1.285284e-12f, 9.241320e-13f, 6.644601e-13f, 4.777535e-13f, 3.435095e-13f, 2.469868e-13f, 1.775859e-13f, 1.276861e-13f, 9.180756e-14f, 6.601056e-14f, 4.746225e-14f, 3.412583e-14f, 2.453681e-14f, 1.764222e-14f, 1.268493e-14f, 9.120591e-15f, 6.557796e-15f, 4.715121e-15f, 3.390219e-15f, 2.437601e-15f, 1.752660e-15f, 1.260180e-15f, 9.060818e-16f, 6.514819e-16f, 4.684220e-16f, 3.368001e-16f, 2.421626e-16f, 1.741173e-16f, 1.251921e-16f, 9.001438e-17f, 6.472124e-17f, 4.653522e-17f, 3.345928e-17f, 2.405756e-17f, 1.729763e-17f, 1.243717e-17f, 8.942447e-18f, 6.429709e-18f, 4.623025e-18f, 3.324001e-18f, 2.389990e-18f, 1.718427e-18f, 1.235566e-18f, 8.883843e-19f, 6.387572e-19f, 4.592728e-19f, 3.302217e-19f, 2.374327e-19f, 1.707165e-19f, 1.227469e-19f, 8.825622e-20f, 6.345711e-20f, 4.562630e-20f, 3.280576e-20f, 2.358767e-20f, 1.695977e-20f, 1.219425e-20f, 8.767785e-21f, 6.304125e-21f, 4.532729e-21f, 3.259077e-21f, 2.343309e-21f, 1.684863e-21f, 1.211433e-21f, 8.710326e-22f, 6.262811e-22f, 4.503024e-22f, 3.237719e-22f, 2.327953e-22f, 1.673821e-22f, 1.203494e-22f, 8.653243e-23f, 6.221768e-23f, 4.473513e-23f, 3.216501e-23f, 2.312696e-23f, 1.662852e-23f, 1.195607e-23f, 8.596534e-24f, 6.180994e-24f, 4.444197e-24f, 3.195422e-24f, 2.297540e-24f, 1.651954e-24f, 1.187772e-24f, 8.540197e-25f, 6.140487e-25f, 4.415072e-25f, 3.174481e-25f, 2.282483e-25f, 1.641128e-25f, 1.179988e-25f, 8.484229e-26f, 6.100245e-26f, 4.386138e-26f, 3.153677e-26f, 2.267525e-26f, 1.630373e-26f, 1.172255e-26f, 8.428628e-27f, 6.060268e-27f, 4.357393e-27f, 3.133009e-27f, 2.252665e-27f, 1.619689e-27f, 1.164572e-27f, 8.373393e-28f, 6.020553e-28f, 4.328838e-28f, 3.112478e-28f, 2.237902e-28f, 1.609074e-28f, 1.156940e-28f, 8.318518e-29f, 5.981097e-29f, 4.300469e-29f, 3.092080e-29f, 2.223236e-29f, 1.598529e-29f, 1.149359e-29f, 8.264003e-30f, 5.941901e-30f, 4.272286e-30f, 3.071816e-30f, 2.208667e-30f, 1.588053e-30f, 1.141826e-30f, 8.209844e-31f, 5.902960e-31f, 4.244287e-31f, 3.051685e-31f, 2.194192e-31f, 1.577646e-31f, 1.134343e-31f, 8.156041e-32f, 5.864275e-32f, 4.216472e-32f, 3.031686e-32f, 2.179812e-32f, 1.567307e-32f, 1.126909e-32f, 8.102591e-33f, 5.825844e-33f, 4.188840e-33f, 3.011818e-33f, 2.165527e-33f, 1.557036e-33f, 1.119524e-33f, 8.049491e-34f, 5.787664e-34f, 4.161388e-34f, 2.992080e-34f, 2.151335e-34f, 1.546831e-34f, 1.112187e-34f, 7.996737e-35f, 5.749734e-35f, 4.134116e-35f, 2.972471e-35f, 2.137236e-35f, 1.536694e-35f, 1.104899e-35f, 7.944331e-36f, 5.712053e-36f, 4.107023e-36f, 2.952991e-36f, 2.123230e-36f, 1.526623e-36f, 1.097657e-36f, 7.892267e-37f, 5.674619e-37f, 4.080108e-37f, 2.933638e-37f, 2.109315e-37f, 1.516619e-37f, 1.090464e-37f, 7.840545e-38f, 5.637430e-38f, 4.053369e-38f, 2.914413e-38f, 2.095492e-38f, 1.506680e-38f};
// shared memory size is 5256 bytes
static __device__ unsigned int counter = 0;
static __global__ __launch_bounds__(block_size, 2)
void Recurrence1(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 1;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
}
val0 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
}
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val0;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
} else {
output[offs + (0 * block_size)] = val0;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence2(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 2;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val1;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence3(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 3;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val2;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence4(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 4;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val3;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence5(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 5;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val4;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence6(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 6;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val5;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence7(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 7;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val6;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence8(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 8;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6, val7;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
val7 = 0;
if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
val7 = input[offs + (7 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
val7 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
spc = help * __shfl(val7, 0, 2);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
spc = help * __shfl(val7, 1, 4);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
spc = help * __shfl(val7, 3, 8);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
spc = help * __shfl(val7, 7, 16);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
spc = help * __shfl(val7, 15, 32);
if (cond) val7 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
spartc[clane + (15 * order + 7 * delta)] = val7;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val7;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
output[offs + (7 * block_size)] = val7;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence9(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 9;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6, val7, val8;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
val7 = 0;
if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)];
val8 = 0;
if (offs + (8 * block_size) < items) val8 = input[offs + (8 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
val7 = input[offs + (7 * block_size)];
val8 = input[offs + (8 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
val7 *= 2.809900e-01f;
val8 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
spc = help * __shfl(val7, 0, 2);
if (cond) val7 += spc;
spc = help * __shfl(val8, 0, 2);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
spc = help * __shfl(val7, 1, 4);
if (cond) val7 += spc;
spc = help * __shfl(val8, 1, 4);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
spc = help * __shfl(val7, 3, 8);
if (cond) val7 += spc;
spc = help * __shfl(val8, 3, 8);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
spc = help * __shfl(val7, 7, 16);
if (cond) val7 += spc;
spc = help * __shfl(val8, 7, 16);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
spc = help * __shfl(val7, 15, 32);
if (cond) val7 += spc;
spc = help * __shfl(val8, 15, 32);
if (cond) val8 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
spartc[clane + (15 * order + 7 * delta)] = val7;
spartc[clane + (15 * order + 8 * delta)] = val8;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
spartc[clane + (31 * order + 8 * delta)] = val8;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 7 * delta)] = val7;
}
__syncthreads();
if (warp < 9) {
val8 += sfacA[tid] * spartc[31 * order + (7 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val8;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7;
if (offs + (8 * block_size) < items) output[offs + (8 * block_size)] = val8;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
output[offs + (7 * block_size)] = val7;
output[offs + (8 * block_size)] = val8;
}
}
struct GPUTimer
{
hipEvent_t beg, end;
GPUTimer() {hipEventCreate(&beg); hipEventCreate(&end);}
~GPUTimer() {hipEventDestroy(beg); hipEventDestroy(end);}
void start() {hipEventRecord(beg, 0);}
double stop() {hipEventRecord(end, 0); hipEventSynchronize(end); float ms; hipEventElapsedTime(&ms, beg, end); return 0.001 * ms;}
};
template< class T1, class T2 >
void check_cpu_reference(const T1 *ref,
const T2 *res,
const long int& ne,
T1& me, T1& mre) {
mre = me = (T1)0;
for (long int i = 0; i < ne; i++)
{
T1 a = (T1)(res[i]) - ref[i];
if( a < (T1)0 ) a = -a;
if( ref[i] != (T1)0 )
{
T1 r = (ref[i] < (T1)0) ? -ref[i] : ref[i];
T1 b = a / r;
mre = b > mre ? b : mre;
}
me = a > me ? a : me;
}
}
int main(int argc, char *argv[])
{/*
printf("Parallel Linear Recurrence Computation\n");
printf("Copyright (c) 2018 Texas State University\n");
*/
if (argc != 3) {
fprintf(stderr, "USAGE: %s problem_size repeats\n", argv[0]);
return -1;
}
const long int n = atol(argv[1]);
const long int iterations = atol(argv[2]);
if (n < 1) {fprintf(stderr, "ERROR: problem_size must be at least 1\n"); return -1;};
int *d_status;
T *h_in, *h_out, *h_sol, *d_in, *d_out, *d_partcarry, *d_fullcarry;
const size_t size = n * sizeof(T);
h_in = (T *)malloc(size); assert(h_in != NULL);
h_out = (T *)malloc(size); assert(h_out != NULL);
h_sol = (T *)malloc(size); assert(h_sol != NULL);
for (int i = 0; i < n; i++) {
h_in[i] = (i & 32) / 16 - 1;
h_sol[i] = 0;
}
for (int i = 0; i < n; i++) {
if ((i - 0) >= 0) {
h_sol[i] += 2.809900e-01f * h_in[i - 0];
}
}
for (int i = 1; i < n; i++) {
if ((i - 1) >= 0) {
h_sol[i] += 7.190100e-01f * h_sol[i - 1];
}
}
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
const int SMs = deviceProp.multiProcessorCount;
int valsperthread = 1;
while ((valsperthread < 9) && (block_size * 2 * SMs * valsperthread < n)) {
valsperthread++;
}
const int chunk_size = valsperthread * block_size;
// const int iterations = 5;
assert(hipSuccess == hipMalloc(&d_in, size));
assert(hipSuccess == hipMalloc(&d_out, size));
assert(hipSuccess == hipMalloc(&d_status, (n + chunk_size - 1) / chunk_size * sizeof(int)));
assert(hipSuccess == hipMalloc(&d_partcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T)));
assert(hipSuccess == hipMalloc(&d_fullcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T)));
assert(hipSuccess == hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(d_out, d_in, size, hipMemcpyDeviceToDevice));
hipMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int));
switch (valsperthread) {
case 1: hipLaunchKernelGGL(( Recurrence1), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 2: hipLaunchKernelGGL(( Recurrence2), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 3: hipLaunchKernelGGL(( Recurrence3), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 4: hipLaunchKernelGGL(( Recurrence4), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 5: hipLaunchKernelGGL(( Recurrence5), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 6: hipLaunchKernelGGL(( Recurrence6), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 7: hipLaunchKernelGGL(( Recurrence7), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 8: hipLaunchKernelGGL(( Recurrence8), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 9: hipLaunchKernelGGL(( Recurrence9), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
}
GPUTimer timer;
timer.start();
// simulating fwd+rev by doubling iterations
for (long i = 0; i < 2*iterations; i++) {
hipMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int));
switch (valsperthread) {
case 1: hipLaunchKernelGGL(( Recurrence1), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 2: hipLaunchKernelGGL(( Recurrence2), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 3: hipLaunchKernelGGL(( Recurrence3), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 4: hipLaunchKernelGGL(( Recurrence4), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 5: hipLaunchKernelGGL(( Recurrence5), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 6: hipLaunchKernelGGL(( Recurrence6), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 7: hipLaunchKernelGGL(( Recurrence7), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 8: hipLaunchKernelGGL(( Recurrence8), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 9: hipLaunchKernelGGL(( Recurrence9), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
}
}
double runtime = timer.stop() / iterations;
double throughput = 0.000000001 * n / runtime;
assert(hipSuccess == hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost));
/*
for (long int i = 0; i < n; i++) {
T s = h_sol[i];
T o = h_out[i];
if (fabsf(o - s) > 0.001) {
printf("result not correct at index %d: %e != %e\n", i, h_sol[i], h_out[i]);
return -1;
}
}
printf("size = %d\tthroughput = %7.4f gigaitems/s\truntime = %7.4f s\tPassed!\n", n, throughput, runtime);
printf("first elements of result are:\n");
for (int i = 0; (i < 8) && (i < n); i++) {
printf(" %f", h_out[i]);
}
printf("\n");
*/
double mebissec = n / (runtime*1024*1024); // Mis/s
float max_abs_err, max_rel_err;
check_cpu_reference(h_sol, h_out, n, max_abs_err, max_rel_err);
printf("%7.7f %e %e\n", mebissec, max_abs_err, max_rel_err);
free(h_in); free(h_out); free(h_sol);
hipFree(d_in); hipFree(d_out); hipFree(d_status); hipFree(d_partcarry); hipFree(d_fullcarry);
return 0;
}
|
35e038a8b63ae52cf4efa0825c38bf2ec35506eb.cu
|
/*
PLR - Parallelized Linear Recurrences [float]
Copyright (c) 2018 Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University http://www.txstate.edu/ocir/.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Sepideh Maleki and Martin Burtscher
non-recursive coefficients: (0.280990)
recursive coefficients: (0.719010)
*** Simulating fwd + rev recursive filter ***
*/
#include <cstdio>
#include <cassert>
#include <cuda.h>
typedef float T;
static const int device = 0;
static const int order = 1;
static const int warp_size = 32;
static const int block_size = 1024;
static __device__ const T facA[264] = {7.190100e-01f, 5.169754e-01f, 3.717105e-01f, 2.672636e-01f, 1.921652e-01f, 1.381687e-01f, 9.934466e-02f, 7.142980e-02f, 5.135874e-02f, 3.692745e-02f, 2.655121e-02f, 1.909058e-02f, 1.372632e-02f, 9.869360e-03f, 7.096169e-03f, 5.102216e-03f, 3.668545e-03f, 2.637720e-03f, 1.896547e-03f, 1.363636e-03f, 9.804681e-04f, 7.049664e-04f, 5.068778e-04f, 3.644502e-04f, 2.620434e-04f, 1.884118e-04f, 1.354700e-04f, 9.740427e-05f, 7.003464e-05f, 5.035561e-05f, 3.620618e-05f, 2.603261e-05f, 1.871771e-05f, 1.345822e-05f, 9.676593e-06f, 6.957568e-06f, 5.002561e-06f, 3.596891e-06f, 2.586201e-06f, 1.859504e-06f, 1.337002e-06f, 9.613179e-07f, 6.911972e-07f, 4.969777e-07f, 3.573319e-07f, 2.569252e-07f, 1.847318e-07f, 1.328240e-07f, 9.550178e-08f, 6.866674e-08f, 4.937207e-08f, 3.549901e-08f, 2.552414e-08f, 1.835211e-08f, 1.319535e-08f, 9.487591e-09f, 6.821673e-09f, 4.904851e-09f, 3.526637e-09f, 2.535687e-09f, 1.823184e-09f, 1.310888e-09f, 9.425415e-10f, 6.776967e-10f, 4.872707e-10f, 3.503525e-10f, 2.519070e-10f, 1.811236e-10f, 1.302297e-10f, 9.363646e-11f, 6.732555e-11f, 4.840774e-11f, 3.480565e-11f, 2.502561e-11f, 1.799367e-11f, 1.293763e-11f, 9.302282e-12f, 6.688434e-12f, 4.809051e-12f, 3.457756e-12f, 2.486161e-12f, 1.787574e-12f, 1.285284e-12f, 9.241320e-13f, 6.644601e-13f, 4.777535e-13f, 3.435095e-13f, 2.469868e-13f, 1.775859e-13f, 1.276861e-13f, 9.180756e-14f, 6.601056e-14f, 4.746225e-14f, 3.412583e-14f, 2.453681e-14f, 1.764222e-14f, 1.268493e-14f, 9.120591e-15f, 6.557796e-15f, 4.715121e-15f, 3.390219e-15f, 2.437601e-15f, 1.752660e-15f, 1.260180e-15f, 9.060818e-16f, 6.514819e-16f, 4.684220e-16f, 3.368001e-16f, 2.421626e-16f, 1.741173e-16f, 1.251921e-16f, 9.001438e-17f, 6.472124e-17f, 4.653522e-17f, 3.345928e-17f, 2.405756e-17f, 1.729763e-17f, 1.243717e-17f, 8.942447e-18f, 6.429709e-18f, 4.623025e-18f, 3.324001e-18f, 2.389990e-18f, 1.718427e-18f, 1.235566e-18f, 8.883843e-19f, 6.387572e-19f, 4.592728e-19f, 3.302217e-19f, 2.374327e-19f, 1.707165e-19f, 1.227469e-19f, 8.825622e-20f, 6.345711e-20f, 4.562630e-20f, 3.280576e-20f, 2.358767e-20f, 1.695977e-20f, 1.219425e-20f, 8.767785e-21f, 6.304125e-21f, 4.532729e-21f, 3.259077e-21f, 2.343309e-21f, 1.684863e-21f, 1.211433e-21f, 8.710326e-22f, 6.262811e-22f, 4.503024e-22f, 3.237719e-22f, 2.327953e-22f, 1.673821e-22f, 1.203494e-22f, 8.653243e-23f, 6.221768e-23f, 4.473513e-23f, 3.216501e-23f, 2.312696e-23f, 1.662852e-23f, 1.195607e-23f, 8.596534e-24f, 6.180994e-24f, 4.444197e-24f, 3.195422e-24f, 2.297540e-24f, 1.651954e-24f, 1.187772e-24f, 8.540197e-25f, 6.140487e-25f, 4.415072e-25f, 3.174481e-25f, 2.282483e-25f, 1.641128e-25f, 1.179988e-25f, 8.484229e-26f, 6.100245e-26f, 4.386138e-26f, 3.153677e-26f, 2.267525e-26f, 1.630373e-26f, 1.172255e-26f, 8.428628e-27f, 6.060268e-27f, 4.357393e-27f, 3.133009e-27f, 2.252665e-27f, 1.619689e-27f, 1.164572e-27f, 8.373393e-28f, 6.020553e-28f, 4.328838e-28f, 3.112478e-28f, 2.237902e-28f, 1.609074e-28f, 1.156940e-28f, 8.318518e-29f, 5.981097e-29f, 4.300469e-29f, 3.092080e-29f, 2.223236e-29f, 1.598529e-29f, 1.149359e-29f, 8.264003e-30f, 5.941901e-30f, 4.272286e-30f, 3.071816e-30f, 2.208667e-30f, 1.588053e-30f, 1.141826e-30f, 8.209844e-31f, 5.902960e-31f, 4.244287e-31f, 3.051685e-31f, 2.194192e-31f, 1.577646e-31f, 1.134343e-31f, 8.156041e-32f, 5.864275e-32f, 4.216472e-32f, 3.031686e-32f, 2.179812e-32f, 1.567307e-32f, 1.126909e-32f, 8.102591e-33f, 5.825844e-33f, 4.188840e-33f, 3.011818e-33f, 2.165527e-33f, 1.557036e-33f, 1.119524e-33f, 8.049491e-34f, 5.787664e-34f, 4.161388e-34f, 2.992080e-34f, 2.151335e-34f, 1.546831e-34f, 1.112187e-34f, 7.996737e-35f, 5.749734e-35f, 4.134116e-35f, 2.972471e-35f, 2.137236e-35f, 1.536694e-35f, 1.104899e-35f, 7.944331e-36f, 5.712053e-36f, 4.107023e-36f, 2.952991e-36f, 2.123230e-36f, 1.526623e-36f, 1.097657e-36f, 7.892267e-37f, 5.674619e-37f, 4.080108e-37f, 2.933638e-37f, 2.109315e-37f, 1.516619e-37f, 1.090464e-37f, 7.840545e-38f, 5.637430e-38f, 4.053369e-38f, 2.914413e-38f, 2.095492e-38f, 1.506680e-38f};
// shared memory size is 5256 bytes
static __device__ unsigned int counter = 0;
static __global__ __launch_bounds__(block_size, 2)
void Recurrence1(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 1;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
}
val0 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
}
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val0;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
} else {
output[offs + (0 * block_size)] = val0;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence2(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 2;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val1;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence3(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 3;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val2;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence4(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 4;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val3;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence5(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 5;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val4;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence6(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 6;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val5;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence7(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 7;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val6;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence8(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 8;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6, val7;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
val7 = 0;
if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
val7 = input[offs + (7 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
val7 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
spc = help * __shfl(val7, 0, 2);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
spc = help * __shfl(val7, 1, 4);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
spc = help * __shfl(val7, 3, 8);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
spc = help * __shfl(val7, 7, 16);
if (cond) val7 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
spc = help * __shfl(val7, 15, 32);
if (cond) val7 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
spartc[clane + (15 * order + 7 * delta)] = val7;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val7;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
output[offs + (7 * block_size)] = val7;
}
}
static __global__ __launch_bounds__(block_size, 2)
void Recurrence9(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry)
{
const int valsperthread = 9;
const int chunk_size = valsperthread * block_size;
__shared__ T spartc[chunk_size / warp_size * order];
__shared__ T sfullc[order];
__shared__ int cid;
const int tid = threadIdx.x;
const int warp = tid / warp_size;
const int lane = tid % warp_size;
__shared__ T sfacA[block_size];
if (tid < 264) sfacA[tid] = facA[tid];
else sfacA[tid] = 0;
if (tid == 0) {
cid = atomicInc(&counter, gridDim.x - 1);
}
__syncthreads();
const int chunk_id = cid;
const int offs = tid + chunk_id * chunk_size;
T val0, val1, val2, val3, val4, val5, val6, val7, val8;
if (chunk_id == gridDim.x - 1) {
val0 = 0;
if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)];
val1 = 0;
if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)];
val2 = 0;
if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)];
val3 = 0;
if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)];
val4 = 0;
if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)];
val5 = 0;
if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)];
val6 = 0;
if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)];
val7 = 0;
if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)];
val8 = 0;
if (offs + (8 * block_size) < items) val8 = input[offs + (8 * block_size)];
} else {
val0 = input[offs + (0 * block_size)];
val1 = input[offs + (1 * block_size)];
val2 = input[offs + (2 * block_size)];
val3 = input[offs + (3 * block_size)];
val4 = input[offs + (4 * block_size)];
val5 = input[offs + (5 * block_size)];
val6 = input[offs + (6 * block_size)];
val7 = input[offs + (7 * block_size)];
val8 = input[offs + (8 * block_size)];
}
val0 *= 2.809900e-01f;
val1 *= 2.809900e-01f;
val2 *= 2.809900e-01f;
val3 *= 2.809900e-01f;
val4 *= 2.809900e-01f;
val5 *= 2.809900e-01f;
val6 *= 2.809900e-01f;
val7 *= 2.809900e-01f;
val8 *= 2.809900e-01f;
const T sfA = sfacA[lane];
int cond;
T help, spc;
help = 7.190100e-01f;
cond = ((lane & 1) != 0);
spc = help * __shfl(val0, 0, 2);
if (cond) val0 += spc;
spc = help * __shfl(val1, 0, 2);
if (cond) val1 += spc;
spc = help * __shfl(val2, 0, 2);
if (cond) val2 += spc;
spc = help * __shfl(val3, 0, 2);
if (cond) val3 += spc;
spc = help * __shfl(val4, 0, 2);
if (cond) val4 += spc;
spc = help * __shfl(val5, 0, 2);
if (cond) val5 += spc;
spc = help * __shfl(val6, 0, 2);
if (cond) val6 += spc;
spc = help * __shfl(val7, 0, 2);
if (cond) val7 += spc;
spc = help * __shfl(val8, 0, 2);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 2);
cond = ((lane & 2) != 0);
spc = help * __shfl(val0, 1, 4);
if (cond) val0 += spc;
spc = help * __shfl(val1, 1, 4);
if (cond) val1 += spc;
spc = help * __shfl(val2, 1, 4);
if (cond) val2 += spc;
spc = help * __shfl(val3, 1, 4);
if (cond) val3 += spc;
spc = help * __shfl(val4, 1, 4);
if (cond) val4 += spc;
spc = help * __shfl(val5, 1, 4);
if (cond) val5 += spc;
spc = help * __shfl(val6, 1, 4);
if (cond) val6 += spc;
spc = help * __shfl(val7, 1, 4);
if (cond) val7 += spc;
spc = help * __shfl(val8, 1, 4);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 4);
cond = ((lane & 4) != 0);
spc = help * __shfl(val0, 3, 8);
if (cond) val0 += spc;
spc = help * __shfl(val1, 3, 8);
if (cond) val1 += spc;
spc = help * __shfl(val2, 3, 8);
if (cond) val2 += spc;
spc = help * __shfl(val3, 3, 8);
if (cond) val3 += spc;
spc = help * __shfl(val4, 3, 8);
if (cond) val4 += spc;
spc = help * __shfl(val5, 3, 8);
if (cond) val5 += spc;
spc = help * __shfl(val6, 3, 8);
if (cond) val6 += spc;
spc = help * __shfl(val7, 3, 8);
if (cond) val7 += spc;
spc = help * __shfl(val8, 3, 8);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 8);
cond = ((lane & 8) != 0);
spc = help * __shfl(val0, 7, 16);
if (cond) val0 += spc;
spc = help * __shfl(val1, 7, 16);
if (cond) val1 += spc;
spc = help * __shfl(val2, 7, 16);
if (cond) val2 += spc;
spc = help * __shfl(val3, 7, 16);
if (cond) val3 += spc;
spc = help * __shfl(val4, 7, 16);
if (cond) val4 += spc;
spc = help * __shfl(val5, 7, 16);
if (cond) val5 += spc;
spc = help * __shfl(val6, 7, 16);
if (cond) val6 += spc;
spc = help * __shfl(val7, 7, 16);
if (cond) val7 += spc;
spc = help * __shfl(val8, 7, 16);
if (cond) val8 += spc;
help = __shfl(sfA, lane % 16);
cond = ((lane & 16) != 0);
spc = help * __shfl(val0, 15, 32);
if (cond) val0 += spc;
spc = help * __shfl(val1, 15, 32);
if (cond) val1 += spc;
spc = help * __shfl(val2, 15, 32);
if (cond) val2 += spc;
spc = help * __shfl(val3, 15, 32);
if (cond) val3 += spc;
spc = help * __shfl(val4, 15, 32);
if (cond) val4 += spc;
spc = help * __shfl(val5, 15, 32);
if (cond) val5 += spc;
spc = help * __shfl(val6, 15, 32);
if (cond) val6 += spc;
spc = help * __shfl(val7, 15, 32);
if (cond) val7 += spc;
spc = help * __shfl(val8, 15, 32);
if (cond) val8 += spc;
const int delta = block_size / warp_size * order;
const int clane = lane - (warp_size - order);
const int clwo = clane + warp * order;
if (((warp & 1) == 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
__syncthreads();
if ((warp & 1) != 0) {
const int cwarp = ((warp & ~1) | 0) * order;
const T helpA = sfacA[tid % (warp_size * 1)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 3) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 2) != 0) {
const int cwarp = ((warp & ~3) | 1) * order;
const T helpA = sfacA[tid % (warp_size * 2)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 7) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 4) != 0) {
const int cwarp = ((warp & ~7) | 3) * order;
const T helpA = sfacA[tid % (warp_size * 4)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if (((warp & 15) != 0) && (clane >= 0)) {
spartc[clwo + 0 * delta] = val0;
spartc[clwo + 1 * delta] = val1;
spartc[clwo + 2 * delta] = val2;
spartc[clwo + 3 * delta] = val3;
spartc[clwo + 4 * delta] = val4;
spartc[clwo + 5 * delta] = val5;
spartc[clwo + 6 * delta] = val6;
spartc[clwo + 7 * delta] = val7;
spartc[clwo + 8 * delta] = val8;
}
}
__syncthreads();
if ((warp & 8) != 0) {
const int cwarp = ((warp & ~15) | 7) * order;
const T helpA = sfacA[tid % (warp_size * 8)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
if ((warp == 15) && (clane >= 0)) {
spartc[clane + (15 * order + 0 * delta)] = val0;
spartc[clane + (15 * order + 1 * delta)] = val1;
spartc[clane + (15 * order + 2 * delta)] = val2;
spartc[clane + (15 * order + 3 * delta)] = val3;
spartc[clane + (15 * order + 4 * delta)] = val4;
spartc[clane + (15 * order + 5 * delta)] = val5;
spartc[clane + (15 * order + 6 * delta)] = val6;
spartc[clane + (15 * order + 7 * delta)] = val7;
spartc[clane + (15 * order + 8 * delta)] = val8;
}
}
__syncthreads();
if ((warp & 16) != 0) {
if ((warp & 15) < 9) {
const int cwarp = 15 * order;
const T helpA = sfacA[tid % (warp_size * 16)];
val0 += helpA * spartc[cwarp + (0 * delta + 0)];
val1 += helpA * spartc[cwarp + (1 * delta + 0)];
val2 += helpA * spartc[cwarp + (2 * delta + 0)];
val3 += helpA * spartc[cwarp + (3 * delta + 0)];
val4 += helpA * spartc[cwarp + (4 * delta + 0)];
val5 += helpA * spartc[cwarp + (5 * delta + 0)];
val6 += helpA * spartc[cwarp + (6 * delta + 0)];
val7 += helpA * spartc[cwarp + (7 * delta + 0)];
val8 += helpA * spartc[cwarp + (8 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 0 * delta)] = val0;
spartc[clane + (31 * order + 2 * delta)] = val2;
spartc[clane + (31 * order + 4 * delta)] = val4;
spartc[clane + (31 * order + 6 * delta)] = val6;
spartc[clane + (31 * order + 8 * delta)] = val8;
}
}
__syncthreads();
if (warp < 9) {
val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)];
val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)];
val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)];
val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 1 * delta)] = val1;
spartc[clane + (31 * order + 5 * delta)] = val5;
}
__syncthreads();
if (warp < 9) {
val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)];
val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 3 * delta)] = val3;
}
__syncthreads();
if (warp < 9) {
val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)];
}
if ((warp == 31) && (clane >= 0)) {
spartc[clane + (31 * order + 7 * delta)] = val7;
}
__syncthreads();
if (warp < 9) {
val8 += sfacA[tid] * spartc[31 * order + (7 * delta + 0)];
}
const int idx = tid - (block_size - order);
if (idx >= 0) {
fullcarry[chunk_id * order + idx] = val8;
__threadfence();
if (idx == 0) {
status[chunk_id] = 2;
}
}
if (chunk_id > 0) {
__syncthreads();
if (warp == 0) {
const int cidm1 = chunk_id - 1;
int flag = 1;
do {
if ((cidm1 - lane) >= 0) {
flag = status[cidm1 - lane];
}
} while ((__any(flag == 0)) || (__all(flag != 2)));
int mask = __ballot(flag == 2);
const int pos = __ffs(mask) - 1;
T X0 = fullcarry[cidm1 - pos];
if (lane == 0) {
sfullc[0] = X0;
}
}
__syncthreads();
T X0 = sfullc[0];
val0 += sfacA[tid] * X0;
}
if (chunk_id == gridDim.x - 1) {
if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0;
if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1;
if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2;
if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3;
if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4;
if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5;
if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6;
if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7;
if (offs + (8 * block_size) < items) output[offs + (8 * block_size)] = val8;
} else {
output[offs + (0 * block_size)] = val0;
output[offs + (1 * block_size)] = val1;
output[offs + (2 * block_size)] = val2;
output[offs + (3 * block_size)] = val3;
output[offs + (4 * block_size)] = val4;
output[offs + (5 * block_size)] = val5;
output[offs + (6 * block_size)] = val6;
output[offs + (7 * block_size)] = val7;
output[offs + (8 * block_size)] = val8;
}
}
struct GPUTimer
{
cudaEvent_t beg, end;
GPUTimer() {cudaEventCreate(&beg); cudaEventCreate(&end);}
~GPUTimer() {cudaEventDestroy(beg); cudaEventDestroy(end);}
void start() {cudaEventRecord(beg, 0);}
double stop() {cudaEventRecord(end, 0); cudaEventSynchronize(end); float ms; cudaEventElapsedTime(&ms, beg, end); return 0.001 * ms;}
};
template< class T1, class T2 >
void check_cpu_reference(const T1 *ref,
const T2 *res,
const long int& ne,
T1& me, T1& mre) {
mre = me = (T1)0;
for (long int i = 0; i < ne; i++)
{
T1 a = (T1)(res[i]) - ref[i];
if( a < (T1)0 ) a = -a;
if( ref[i] != (T1)0 )
{
T1 r = (ref[i] < (T1)0) ? -ref[i] : ref[i];
T1 b = a / r;
mre = b > mre ? b : mre;
}
me = a > me ? a : me;
}
}
int main(int argc, char *argv[])
{/*
printf("Parallel Linear Recurrence Computation\n");
printf("Copyright (c) 2018 Texas State University\n");
*/
if (argc != 3) {
fprintf(stderr, "USAGE: %s problem_size repeats\n", argv[0]);
return -1;
}
const long int n = atol(argv[1]);
const long int iterations = atol(argv[2]);
if (n < 1) {fprintf(stderr, "ERROR: problem_size must be at least 1\n"); return -1;};
int *d_status;
T *h_in, *h_out, *h_sol, *d_in, *d_out, *d_partcarry, *d_fullcarry;
const size_t size = n * sizeof(T);
h_in = (T *)malloc(size); assert(h_in != NULL);
h_out = (T *)malloc(size); assert(h_out != NULL);
h_sol = (T *)malloc(size); assert(h_sol != NULL);
for (int i = 0; i < n; i++) {
h_in[i] = (i & 32) / 16 - 1;
h_sol[i] = 0;
}
for (int i = 0; i < n; i++) {
if ((i - 0) >= 0) {
h_sol[i] += 2.809900e-01f * h_in[i - 0];
}
}
for (int i = 1; i < n; i++) {
if ((i - 1) >= 0) {
h_sol[i] += 7.190100e-01f * h_sol[i - 1];
}
}
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
const int SMs = deviceProp.multiProcessorCount;
int valsperthread = 1;
while ((valsperthread < 9) && (block_size * 2 * SMs * valsperthread < n)) {
valsperthread++;
}
const int chunk_size = valsperthread * block_size;
// const int iterations = 5;
assert(cudaSuccess == cudaMalloc(&d_in, size));
assert(cudaSuccess == cudaMalloc(&d_out, size));
assert(cudaSuccess == cudaMalloc(&d_status, (n + chunk_size - 1) / chunk_size * sizeof(int)));
assert(cudaSuccess == cudaMalloc(&d_partcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T)));
assert(cudaSuccess == cudaMalloc(&d_fullcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T)));
assert(cudaSuccess == cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(d_out, d_in, size, cudaMemcpyDeviceToDevice));
cudaMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int));
switch (valsperthread) {
case 1: Recurrence1<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 2: Recurrence2<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 3: Recurrence3<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 4: Recurrence4<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 5: Recurrence5<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 6: Recurrence6<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 7: Recurrence7<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 8: Recurrence8<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 9: Recurrence9<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
}
GPUTimer timer;
timer.start();
// simulating fwd+rev by doubling iterations
for (long i = 0; i < 2*iterations; i++) {
cudaMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int));
switch (valsperthread) {
case 1: Recurrence1<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 2: Recurrence2<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 3: Recurrence3<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 4: Recurrence4<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 5: Recurrence5<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 6: Recurrence6<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 7: Recurrence7<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 8: Recurrence8<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
case 9: Recurrence9<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break;
}
}
double runtime = timer.stop() / iterations;
double throughput = 0.000000001 * n / runtime;
assert(cudaSuccess == cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost));
/*
for (long int i = 0; i < n; i++) {
T s = h_sol[i];
T o = h_out[i];
if (fabsf(o - s) > 0.001) {
printf("result not correct at index %d: %e != %e\n", i, h_sol[i], h_out[i]);
return -1;
}
}
printf("size = %d\tthroughput = %7.4f gigaitems/s\truntime = %7.4f s\tPassed!\n", n, throughput, runtime);
printf("first elements of result are:\n");
for (int i = 0; (i < 8) && (i < n); i++) {
printf(" %f", h_out[i]);
}
printf("\n");
*/
double mebissec = n / (runtime*1024*1024); // Mis/s
float max_abs_err, max_rel_err;
check_cpu_reference(h_sol, h_out, n, max_abs_err, max_rel_err);
printf("%7.7f %e %e\n", mebissec, max_abs_err, max_rel_err);
free(h_in); free(h_out); free(h_sol);
cudaFree(d_in); cudaFree(d_out); cudaFree(d_status); cudaFree(d_partcarry); cudaFree(d_fullcarry);
return 0;
}
|
f9e12755eca83a7d8e2715109251035355a2dce4.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=false)
{
if (code != hipSuccess)
{
//fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) {
char buffer[200];
snprintf(buffer, sizeof(buffer), "GPUassert error in CUDA kernel: %s %s %d\n", hipGetErrorString(code), file, line);
std::string buffer_string = buffer;
throw std::runtime_error(buffer_string);
exit(code);
}
}
}
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
neighbors.clear();
for (unsigned i=0;i<mass.num_neighbors;i++) {
neighbors.push_back(mass.arrayptr->neighbors[i]);
}
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
arrayptr = &mass;
//printf("CUDA: %p ->Mass: %p\n", this, arrayptr);
CUDA_MASS ** temp = new CUDA_MASS * [mass.neighbors.size()];
for (unsigned i=0;i<mass.neighbors.size();i++) {
temp[i] = mass.neighbors[i]->arrayptr;
}
gpuErrchk(hipMalloc((void **) &neighbors, sizeof(CUDA_MASS *) * mass.neighbors.size()));
gpuErrchk(hipMemcpy(neighbors, temp, mass.neighbors.size() * sizeof(CUDA_MASS *), hipMemcpyHostToDevice));
delete temp;
num_neighbors = mass.neighbors.size();
//printf("CUDA_MASS(mass): # of neighbors: %d. \n", num_neighbors);
// for (unsigned i=0;i<mass.neighbors.size();i++) {
// neighbors[i] = mass.neighbors[i]->arrayptr;
// printf("ptr: %p\n", mass.neighbors[i]->arrayptr);
// }
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
hipFree(arrayptr);
}
delete this;
}
}
|
f9e12755eca83a7d8e2715109251035355a2dce4.cu
|
//
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=false)
{
if (code != cudaSuccess)
{
//fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) {
char buffer[200];
snprintf(buffer, sizeof(buffer), "GPUassert error in CUDA kernel: %s %s %d\n", cudaGetErrorString(code), file, line);
std::string buffer_string = buffer;
throw std::runtime_error(buffer_string);
exit(code);
}
}
}
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
neighbors.clear();
for (unsigned i=0;i<mass.num_neighbors;i++) {
neighbors.push_back(mass.arrayptr->neighbors[i]);
}
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
arrayptr = &mass;
//printf("CUDA: %p ->Mass: %p\n", this, arrayptr);
CUDA_MASS ** temp = new CUDA_MASS * [mass.neighbors.size()];
for (unsigned i=0;i<mass.neighbors.size();i++) {
temp[i] = mass.neighbors[i]->arrayptr;
}
gpuErrchk(cudaMalloc((void **) &neighbors, sizeof(CUDA_MASS *) * mass.neighbors.size()));
gpuErrchk(cudaMemcpy(neighbors, temp, mass.neighbors.size() * sizeof(CUDA_MASS *), cudaMemcpyHostToDevice));
delete temp;
num_neighbors = mass.neighbors.size();
//printf("CUDA_MASS(mass): # of neighbors: %d. \n", num_neighbors);
// for (unsigned i=0;i<mass.neighbors.size();i++) {
// neighbors[i] = mass.neighbors[i]->arrayptr;
// printf("ptr: %p\n", mass.neighbors[i]->arrayptr);
// }
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
cudaFree(arrayptr);
}
delete this;
}
}
|
d6115a7e34b20625df4503fb21d4108f976c39d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/general_buffer2.hpp"
#include "HugeCTR/include/optimizers/momentum_sgd_optimizer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
template <typename T>
__global__ void momentum_sgd_update_kernel(int len, float* weight, T* momentum, const T* wgrad,
float lr, float momentum_factor, float scaler) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < len) {
float mv = momentum_factor * TypeConvertFunc<float, T>::convert(momentum[idx]) -
lr * TypeConvertFunc<float, T>::convert(wgrad[idx]) / scaler;
momentum[idx] = TypeConvertFunc<T, float>::convert(mv);
weight[idx] += mv;
}
return;
}
} // namespace
template <typename T>
MomentumSGDOptimizer<T>::MomentumSGDOptimizer(
const Tensor2<float>& weight, const Tensor2<T>& wgrad,
const std::shared_ptr<BufferBlock2<T>>& opt_buf,
const std::shared_ptr<GPUResource>& gpu_resource, float learning_rate, float momentum_factor,
float scaler)
: Optimizer(weight, gpu_resource, learning_rate,
scaler),
wgrad_(wgrad),
momentum_factor_(momentum_factor) {
if(weight_main_.get_num_elements() != wgrad_.get_num_elements()) {
CK_THROW_(Error_t::WrongInput,
"weight->get_num_elements() != wgrad->get_num_elements()");
}
opt_buf->reserve({weight.get_num_elements()}, &momentum_);
}
template <typename T>
void MomentumSGDOptimizer<T>::initialize() {
CK_CUDA_THROW_(hipMemsetAsync(momentum_.get_ptr(), 0, momentum_.get_size_in_bytes(),
gpu_resource_->get_stream()));
}
template <typename T>
void MomentumSGDOptimizer<T>::update() {
CudaDeviceContext context(get_device_id());
const size_t len = weight_main_.get_num_elements();
constexpr size_t block_dim = 256;
const size_t grid_dim = (len - 1) / block_dim + 1;
float* weight = weight_main_.get_ptr();
T *momentum = momentum_.get_ptr();
T *wgrad = wgrad_.get_ptr();
hipLaunchKernelGGL(( momentum_sgd_update_kernel), dim3(grid_dim), dim3(block_dim), 0, gpu_resource_->get_stream(),
len, weight, momentum, wgrad, lr_, momentum_factor_, scaler_);
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class MomentumSGDOptimizer<float>;
template class MomentumSGDOptimizer<__half>;
} // namespace HugeCTR
|
d6115a7e34b20625df4503fb21d4108f976c39d7.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/general_buffer2.hpp"
#include "HugeCTR/include/optimizers/momentum_sgd_optimizer.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
template <typename T>
__global__ void momentum_sgd_update_kernel(int len, float* weight, T* momentum, const T* wgrad,
float lr, float momentum_factor, float scaler) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < len) {
float mv = momentum_factor * TypeConvertFunc<float, T>::convert(momentum[idx]) -
lr * TypeConvertFunc<float, T>::convert(wgrad[idx]) / scaler;
momentum[idx] = TypeConvertFunc<T, float>::convert(mv);
weight[idx] += mv;
}
return;
}
} // namespace
template <typename T>
MomentumSGDOptimizer<T>::MomentumSGDOptimizer(
const Tensor2<float>& weight, const Tensor2<T>& wgrad,
const std::shared_ptr<BufferBlock2<T>>& opt_buf,
const std::shared_ptr<GPUResource>& gpu_resource, float learning_rate, float momentum_factor,
float scaler)
: Optimizer(weight, gpu_resource, learning_rate,
scaler),
wgrad_(wgrad),
momentum_factor_(momentum_factor) {
if(weight_main_.get_num_elements() != wgrad_.get_num_elements()) {
CK_THROW_(Error_t::WrongInput,
"weight->get_num_elements() != wgrad->get_num_elements()");
}
opt_buf->reserve({weight.get_num_elements()}, &momentum_);
}
template <typename T>
void MomentumSGDOptimizer<T>::initialize() {
CK_CUDA_THROW_(cudaMemsetAsync(momentum_.get_ptr(), 0, momentum_.get_size_in_bytes(),
gpu_resource_->get_stream()));
}
template <typename T>
void MomentumSGDOptimizer<T>::update() {
CudaDeviceContext context(get_device_id());
const size_t len = weight_main_.get_num_elements();
constexpr size_t block_dim = 256;
const size_t grid_dim = (len - 1) / block_dim + 1;
float* weight = weight_main_.get_ptr();
T *momentum = momentum_.get_ptr();
T *wgrad = wgrad_.get_ptr();
momentum_sgd_update_kernel<<<grid_dim, block_dim, 0, gpu_resource_->get_stream()>>>(
len, weight, momentum, wgrad, lr_, momentum_factor_, scaler_);
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class MomentumSGDOptimizer<float>;
template class MomentumSGDOptimizer<__half>;
} // namespace HugeCTR
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.