hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
626266dd36f3cb855c4e896ed9cb27a1f763ab24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <time.h>
#include "hip/hip_fp16.h"
#define L1_SIZE 65536
#define FP_TYPE float
#define FP_DEV_TYPE float
/* Kernel for vector addition */
__global__ void Vec_add(FP_DEV_TYPE x[], FP_DEV_TYPE y[], FP_DEV_TYPE z[], int n, FP_DEV_TYPE lookup[], uint32_t startClk[], uint32_t stopClk[]) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
// a register to avoid compiler optimization
float sink = 0;
if (tid < n) {
float temp = x[tid];
// synchronize all threads
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// doing computation
sink = __expf(temp);
// synchronize all threads
//asm volatile ("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
// dsink[tid] = sink;
z[tid] = sink;
}
} /* Vec_add */
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
FP_TYPE *h_x, *h_y, *h_z, *h_lookup;
FP_DEV_TYPE *d_x, *d_y, *d_z, *d_lookup ;
uint32_t *h_startClk, *h_stopClk;
uint32_t *d_startClk, *d_stopClk;
int threads_per_block;
int block_count;
size_t size, size_clock;
hipEvent_t start, stop;
float elapsedTime;
srand(time(0));
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10); // half2 = 2x half , reduce size
size = n*sizeof(FP_TYPE);
size_clock = n*sizeof(uint32_t);
/* Allocate input vectors in host memory */
h_x = (FP_TYPE*) malloc(size);
h_y = (FP_TYPE*) malloc(size);
h_z = (FP_TYPE*) malloc(size);
h_startClk = (uint32_t*) malloc(size_clock);
h_stopClk = (uint32_t*) malloc(size_clock);
h_lookup = (FP_TYPE*) malloc(L1_SIZE*sizeof(FP_TYPE));
// declare and allocate memory
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_x[i] = 1.0/i;
// h_x[i] = rand()%L1_SIZE;
}
for (i=0;i<L1_SIZE;i++)
h_lookup[i] = (i*5)%L1_SIZE;
/* Allocate vectors in device memory */
hipMalloc(&d_x, size);
hipMalloc(&d_y, size);
hipMalloc(&d_z, size);
hipMalloc(&d_lookup, L1_SIZE*sizeof(FP_TYPE));
hipMalloc(&d_stopClk, size_clock);
hipMalloc(&d_startClk, size_clock);
/* Copy vectors from host memory to device memory */
hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice);
hipMemcpy(d_lookup, h_lookup, L1_SIZE*sizeof(FP_TYPE), hipMemcpyHostToDevice);
// hipMemcpy(buffer, h_buffer, MAX_TEXTURE_SIZE*sizeof(float), hipMemcpyHostToDevice); //copy data to texture
/* Define block size */
threads_per_block = 256;
block_count = (n + threads_per_block - 1)/threads_per_block;
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( Vec_add), dim3(block_count), dim3(threads_per_block), 0, 0, d_x, d_y, d_z, n, d_lookup, d_startClk,d_stopClk);
hipDeviceSynchronize();
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
hipMemcpy(h_z, d_z, size, hipMemcpyDeviceToHost);
hipMemcpy(h_startClk, d_startClk, size_clock, hipMemcpyDeviceToHost);
hipMemcpy(h_stopClk, d_stopClk, size_clock, hipMemcpyDeviceToHost);
uint32_t sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
;
// printf("%d, \n", h_z[i]);
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n average latency (cycles) %f \n",float(sum)/n);
/* Free device memory */
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
| 626266dd36f3cb855c4e896ed9cb27a1f763ab24.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <time.h>
#include "cuda_fp16.h"
#define L1_SIZE 65536
#define FP_TYPE float
#define FP_DEV_TYPE float
/* Kernel for vector addition */
__global__ void Vec_add(FP_DEV_TYPE x[], FP_DEV_TYPE y[], FP_DEV_TYPE z[], int n, FP_DEV_TYPE lookup[], uint32_t startClk[], uint32_t stopClk[]) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
// a register to avoid compiler optimization
float sink = 0;
if (tid < n) {
float temp = x[tid];
// synchronize all threads
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// doing computation
sink = __expf(temp);
// synchronize all threads
//asm volatile ("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
// dsink[tid] = sink;
z[tid] = sink;
}
} /* Vec_add */
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
FP_TYPE *h_x, *h_y, *h_z, *h_lookup;
FP_DEV_TYPE *d_x, *d_y, *d_z, *d_lookup ;
uint32_t *h_startClk, *h_stopClk;
uint32_t *d_startClk, *d_stopClk;
int threads_per_block;
int block_count;
size_t size, size_clock;
cudaEvent_t start, stop;
float elapsedTime;
srand(time(0));
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10); // half2 = 2x half , reduce size
size = n*sizeof(FP_TYPE);
size_clock = n*sizeof(uint32_t);
/* Allocate input vectors in host memory */
h_x = (FP_TYPE*) malloc(size);
h_y = (FP_TYPE*) malloc(size);
h_z = (FP_TYPE*) malloc(size);
h_startClk = (uint32_t*) malloc(size_clock);
h_stopClk = (uint32_t*) malloc(size_clock);
h_lookup = (FP_TYPE*) malloc(L1_SIZE*sizeof(FP_TYPE));
// declare and allocate memory
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_x[i] = 1.0/i;
// h_x[i] = rand()%L1_SIZE;
}
for (i=0;i<L1_SIZE;i++)
h_lookup[i] = (i*5)%L1_SIZE;
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
cudaMalloc(&d_lookup, L1_SIZE*sizeof(FP_TYPE));
cudaMalloc(&d_stopClk, size_clock);
cudaMalloc(&d_startClk, size_clock);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_lookup, h_lookup, L1_SIZE*sizeof(FP_TYPE), cudaMemcpyHostToDevice);
// cudaMemcpy(buffer, h_buffer, MAX_TEXTURE_SIZE*sizeof(float), cudaMemcpyHostToDevice); //copy data to texture
/* Define block size */
threads_per_block = 256;
block_count = (n + threads_per_block - 1)/threads_per_block;
cudaEventCreate(&start);
cudaEventRecord(start,0);
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n, d_lookup, d_startClk,d_stopClk);
cudaThreadSynchronize();
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
uint32_t sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
;
// printf("%d, \n", h_z[i]);
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n average latency (cycles) %f \n",float(sum)/n);
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
4303acc9b2776fe326121de12285f1793f6d6753.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_mul(char* newB, char* first, char* second, int size_first, int size_second, int * size_newB) {
int i = threadIdx.x;
int j = threadIdx.y;
int tid = j * gridDim.x * blockDim.x + i ;
if(j!=0 && i!=0){
newB[tid] = first[i] * second[j];
}
if(j==0 && i==0){
if(first[j] != second[i])
newB[0]='-';
else
newB[0]='+';
}
} | 4303acc9b2776fe326121de12285f1793f6d6753.cu | #include "includes.h"
__global__ void kernel_mul(char* newB, char* first, char* second, int size_first, int size_second, int * size_newB) {
int i = threadIdx.x;
int j = threadIdx.y;
int tid = j * gridDim.x * blockDim.x + i ;
if(j!=0 && i!=0){
newB[tid] = first[i] * second[j];
}
if(j==0 && i==0){
if(first[j] != second[i])
newB[0]='-';
else
newB[0]='+';
}
} |
9f51ddc722a3be6b38e5cfa58f42e02ae68e34f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<stdio.h>
#include "CUDA.h"
#include "Random.h"
#include "mycomplex.h"
#include "model.h"
using namespace std;
/* ----------------------------------------*/
/**************************/
double *yzero;
__device__ double const tau=1.;
__device__ double const one_over_tau=1./tau;
double const iniy_max=10.;
double const iniy_min=-10.;
__device__ inline double kappa(double zz){
double amp=0.0387;
double kzero=0.0017;
/* double zmax=100.;
double mu=0.1;
double zabs=abs(zz);
if (zabs > zmax){
return mu*zmax*zmax;
}
else{
return mu*zz*zz;
} */
return amp*sin(zz)*sin(zz)+kzero;
}
/* ----------------------------------------*/
__global__ void inirand_evolve(unsigned long long seed[]);
/* ----------------------------------------*/
/*__device__ double telegraph(double nu, double tt, int local_index, hiprandState_t mystate){
double poisson_mean=nu*tt;
double pr=Poisson(poisson_mean,&mystate);
int ppower=(int) fmod(pr,2.);
double tele_ran = powf(-1,ppower);
return tele_ran;
}*/
/* ----------------------------------------*/
__device__ void eval_rhs(double rhs[],double tt,double yy[],int lindex){
double vv=yy[lindex+1];
double zdot = vv;
/* --The stochastic part of the equation is added outside the usual integrator - */
double vdot = -one_over_tau*vv;
/* ---------------------------------------------------- */
rhs[0]=zdot;
rhs[1]=vdot;
}
/* ----------------------------------------*/
__device__ void stochastic(double yy[],hiprandState_t global_state[], double tlocal,
double deltat,int lindex)
{
double pi = 4.*atan(1.);
double zz=yy[lindex];
//double r = fmod(zz,pi);
double mean=0;
//double sigma=1.;
double sigma=kappa(zz);
int tid=lindex/pdim;
hiprandState_t local_state=global_state[tid];
double uu = Gaussian(mean,sigma,&local_state);
global_state[tid] = local_state;
yy[lindex+1]=yy[lindex+1]+one_over_tau*uu*sqrt(deltat);
}
/*---------------
__global__ void inirand_evolve(unsigned long long seed[], dev_global_state[]){
int tid = blockIdx.x;
unsigned long long local_seed = seed[tid];
hiprandState_t local_state;
local_state = dev_global_state[tid];
hiprand_init(local_seed,tid,0, &local_state);
dev_global_state[tid] = local_state;
}*/
/* ----------------------------------------*/
void iniconf(double y[],int Nensemble, hiprandState_t rand_state[]){
hiprandState_t *dev_iniran_state;
double rand[Nensemble],rand2[Nensemble];
double *dev_rand;
unsigned long long seed[Nensemble];
unsigned long long *dev_seed;
for(int i=0;i<Nensemble;i++){
seed[i]=37*i+53*i*i;
rand[i]=0.;
rand2[i]=0.;
}
dev_rand= host2dev(Nensemble,rand);
dev_seed = host2dev(Nensemble,seed);
hipMalloc( (void**)&dev_iniran_state, Nensemble*sizeof(hiprandState_t) );
hipLaunchKernelGGL(( init_random), dim3(Nensemble),dim3(1), 0, 0, dev_seed,dev_iniran_state);
hipLaunchKernelGGL(( UniformRandom), dim3(Nensemble),dim3(1), 0, 0, dev_rand, dev_iniran_state);
dev2host(rand,Nensemble,dev_rand);
hipLaunchKernelGGL(( UniformRandom), dim3(Nensemble),dim3(1), 0, 0, dev_rand, dev_iniran_state);
dev2host(rand2,Nensemble,dev_rand);
for(int j=0;j<Nensemble;j++){
// Uniformly distributed initial position between iniy_min to iniy_max
y[0+j*pdim]=iniy_min+rand[j]*(iniy_max-iniy_min);
// and random initial velocity
y[1+j*pdim]=rand2[j];
printf("y0,y1,%lf,%lf\n",y[0],y[1]);
}
/* copy the state of the random no. generator to host */
dev2host(rand_state,Nensemble,dev_iniran_state);
// inirand_evolve<<<Nensemble,1>>>(dev_seed, dev_rand_state);
}
/* ----------------------------------------*/
__host__ void diag(double tt, double y[], int Nensemble, FILE* tseries, FILE* diagf){
int ndim=pdim*Nensemble;
if (tt == 0.) {
yzero=(double*)malloc(ndim*sizeof(double));
for (int i=0;i<ndim;i++){
yzero[i]=y[i];
}
}
//printf("%lf\t%lf\t%lf\t%lf\t%lf\n",tt,y[0],y[1],y[2],y[3]);
fprintf(tseries,"%lf\t",tt);
for (int i=0;i<ndim-1;i++){
fprintf(tseries,"%lf\t",y[i]);
}
fprintf(tseries,"%lf\n",y[ndim-1]);
double meanz=0.;
double meanv=0.;
double dzrms=0;
for(int i=0; i<Nensemble; i++){
int lindex=pdim*i;
double zz=y[lindex];
double dz=y[lindex]-yzero[lindex];
double vv=y[lindex+1];
meanz= zz+meanz ;
dzrms= dz*dz+dzrms ;
meanv= vv+meanv ;
}
meanz=meanz/Nensemble;
meanv=meanv/Nensemble;
dzrms=sqrt(dzrms)/Nensemble;
printf("%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,meanv);
fprintf(diagf,"%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,meanv);
}
/* ----------------------------------------*/
| 9f51ddc722a3be6b38e5cfa58f42e02ae68e34f4.cu | #include<math.h>
#include<stdio.h>
#include "CUDA.h"
#include "Random.h"
#include "mycomplex.h"
#include "model.h"
using namespace std;
/* ----------------------------------------*/
/**************************/
double *yzero;
__device__ double const tau=1.;
__device__ double const one_over_tau=1./tau;
double const iniy_max=10.;
double const iniy_min=-10.;
__device__ inline double kappa(double zz){
double amp=0.0387;
double kzero=0.0017;
/* double zmax=100.;
double mu=0.1;
double zabs=abs(zz);
if (zabs > zmax){
return mu*zmax*zmax;
}
else{
return mu*zz*zz;
} */
return amp*sin(zz)*sin(zz)+kzero;
}
/* ----------------------------------------*/
__global__ void inirand_evolve(unsigned long long seed[]);
/* ----------------------------------------*/
/*__device__ double telegraph(double nu, double tt, int local_index, curandState mystate){
double poisson_mean=nu*tt;
double pr=Poisson(poisson_mean,&mystate);
int ppower=(int) fmod(pr,2.);
double tele_ran = powf(-1,ppower);
return tele_ran;
}*/
/* ----------------------------------------*/
__device__ void eval_rhs(double rhs[],double tt,double yy[],int lindex){
double vv=yy[lindex+1];
double zdot = vv;
/* --The stochastic part of the equation is added outside the usual integrator - */
double vdot = -one_over_tau*vv;
/* ---------------------------------------------------- */
rhs[0]=zdot;
rhs[1]=vdot;
}
/* ----------------------------------------*/
__device__ void stochastic(double yy[],curandState global_state[], double tlocal,
double deltat,int lindex)
{
double pi = 4.*atan(1.);
double zz=yy[lindex];
//double r = fmod(zz,pi);
double mean=0;
//double sigma=1.;
double sigma=kappa(zz);
int tid=lindex/pdim;
curandState local_state=global_state[tid];
double uu = Gaussian(mean,sigma,&local_state);
global_state[tid] = local_state;
yy[lindex+1]=yy[lindex+1]+one_over_tau*uu*sqrt(deltat);
}
/*---------------
__global__ void inirand_evolve(unsigned long long seed[], dev_global_state[]){
int tid = blockIdx.x;
unsigned long long local_seed = seed[tid];
curandState local_state;
local_state = dev_global_state[tid];
curand_init(local_seed,tid,0, &local_state);
dev_global_state[tid] = local_state;
}*/
/* ----------------------------------------*/
void iniconf(double y[],int Nensemble, curandState rand_state[]){
curandState *dev_iniran_state;
double rand[Nensemble],rand2[Nensemble];
double *dev_rand;
unsigned long long seed[Nensemble];
unsigned long long *dev_seed;
for(int i=0;i<Nensemble;i++){
seed[i]=37*i+53*i*i;
rand[i]=0.;
rand2[i]=0.;
}
dev_rand= host2dev(Nensemble,rand);
dev_seed = host2dev(Nensemble,seed);
cudaMalloc( (void**)&dev_iniran_state, Nensemble*sizeof(curandState) );
init_random<<<Nensemble,1>>>(dev_seed,dev_iniran_state);
UniformRandom<<<Nensemble,1>>>(dev_rand, dev_iniran_state);
dev2host(rand,Nensemble,dev_rand);
UniformRandom<<<Nensemble,1>>>(dev_rand, dev_iniran_state);
dev2host(rand2,Nensemble,dev_rand);
for(int j=0;j<Nensemble;j++){
// Uniformly distributed initial position between iniy_min to iniy_max
y[0+j*pdim]=iniy_min+rand[j]*(iniy_max-iniy_min);
// and random initial velocity
y[1+j*pdim]=rand2[j];
printf("y0,y1,%lf,%lf\n",y[0],y[1]);
}
/* copy the state of the random no. generator to host */
dev2host(rand_state,Nensemble,dev_iniran_state);
// inirand_evolve<<<Nensemble,1>>>(dev_seed, dev_rand_state);
}
/* ----------------------------------------*/
__host__ void diag(double tt, double y[], int Nensemble, FILE* tseries, FILE* diagf){
int ndim=pdim*Nensemble;
if (tt == 0.) {
yzero=(double*)malloc(ndim*sizeof(double));
for (int i=0;i<ndim;i++){
yzero[i]=y[i];
}
}
//printf("%lf\t%lf\t%lf\t%lf\t%lf\n",tt,y[0],y[1],y[2],y[3]);
fprintf(tseries,"%lf\t",tt);
for (int i=0;i<ndim-1;i++){
fprintf(tseries,"%lf\t",y[i]);
}
fprintf(tseries,"%lf\n",y[ndim-1]);
double meanz=0.;
double meanv=0.;
double dzrms=0;
for(int i=0; i<Nensemble; i++){
int lindex=pdim*i;
double zz=y[lindex];
double dz=y[lindex]-yzero[lindex];
double vv=y[lindex+1];
meanz= zz+meanz ;
dzrms= dz*dz+dzrms ;
meanv= vv+meanv ;
}
meanz=meanz/Nensemble;
meanv=meanv/Nensemble;
dzrms=sqrt(dzrms)/Nensemble;
printf("%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,meanv);
fprintf(diagf,"%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,meanv);
}
/* ----------------------------------------*/
|
eeaca9ff123fb50e5299718bf17f908c1a0f23a7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shMatMul_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int matrixSize = XSIZE*YSIZE;
float *matrixA = NULL;
hipMalloc(&matrixA, XSIZE*YSIZE);
float *matrixB = NULL;
hipMalloc(&matrixB, XSIZE*YSIZE);
float *matrixC = NULL;
hipMalloc(&matrixC, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shMatMul_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrixSize,matrixA,matrixB,matrixC);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shMatMul_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrixSize,matrixA,matrixB,matrixC);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shMatMul_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, matrixSize,matrixA,matrixB,matrixC);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eeaca9ff123fb50e5299718bf17f908c1a0f23a7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shMatMul_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int matrixSize = XSIZE*YSIZE;
float *matrixA = NULL;
cudaMalloc(&matrixA, XSIZE*YSIZE);
float *matrixB = NULL;
cudaMalloc(&matrixB, XSIZE*YSIZE);
float *matrixC = NULL;
cudaMalloc(&matrixC, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shMatMul_Kernel<<<gridBlock,threadBlock>>>(matrixSize,matrixA,matrixB,matrixC);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shMatMul_Kernel<<<gridBlock,threadBlock>>>(matrixSize,matrixA,matrixB,matrixC);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shMatMul_Kernel<<<gridBlock,threadBlock>>>(matrixSize,matrixA,matrixB,matrixC);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c37b3240a7fc9698530ce64f9f2f48bdb90c3dad.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include <thrust/execution_policy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int * dev_iterationsPerPixel, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
int iter = dev_iterationsPerPixel[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static PathSegment * dev_paths2 = NULL;
static PathSegment * dev_pathBounce1Cache = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections2 = NULL;
static ShadeableIntersection * dev_itxnBounce1Cache = NULL;
// mesh data
static Tri * dev_meshTris = NULL;
static int* dev_meshStartIndices = NULL;
static int* dev_meshEndIndices = NULL;
static Tri * dev_bboxTris = NULL;
// adaptive sampling data
static int * dev_iterationsPerPixel = NULL;
// note variance has to be calculated for R, G, & B so we need a vec3
static glm::vec3 * dev_variancePerPixel = NULL;
static int numPixelsCulled = 0;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_paths2, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_pathBounce1Cache, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_intersections2, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_itxnBounce1Cache, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// --- init adaptive sampling ---
hipMalloc(&dev_iterationsPerPixel, pixelcount * sizeof(int));
hipMemset(dev_iterationsPerPixel, 0, pixelcount * sizeof(int));
hipMalloc(&dev_variancePerPixel, pixelcount * sizeof(glm::vec3));
hipMemset(dev_variancePerPixel, 0, pixelcount * sizeof(glm::vec3));
// --- init mesh loading ---
// load the triangles of all meshes into one big array
int totalTris = 0;
int numMeshes = 0;
for (auto& g : hst_scene->geoms) {
if (g.type == MESH) {
totalTris += g.numTris;
numMeshes++;
}
}
// if there are any meshes in the
if (numMeshes) {
hipMalloc(&dev_meshTris, totalTris * sizeof(Tri));
hipMalloc(&dev_meshStartIndices, numMeshes * sizeof(int));
hipMalloc(&dev_meshEndIndices, numMeshes * sizeof(int));
hipMalloc(&dev_bboxTris, 12 * numMeshes * sizeof(Tri));
// add the tris from all our geo
int startIndex = 0;
int meshNum = 0;
for (auto& g : hst_scene->geoms) {
if (g.type == MESH) {
// copy the tris from this geo, offset the
// start index for the next copy
hipMemcpy(dev_meshTris + startIndex,
g.tris + startIndex,
g.numTris * sizeof(Tri),
hipMemcpyHostToDevice);
// copy the start index for this mesh
hipMemcpy(dev_meshStartIndices + meshNum,
&startIndex,
sizeof(int),
hipMemcpyHostToDevice);
// incr the start index for the next mesh
startIndex += g.numTris;
// start index for the next mesh is the end index for
// this mesh
hipMemcpy(dev_meshEndIndices + meshNum,
&startIndex,
sizeof(int),
hipMemcpyHostToDevice);
// copy the bounding box tris
hipMemcpy(dev_bboxTris + 12 * meshNum,
g.boundingBox,
12 * sizeof(Tri),
hipMemcpyHostToDevice);
}
}
}
else {
// declare an empty (nearly) array just because it needs to exist
// for freeing/reference etc.
hipMalloc(&dev_meshTris, sizeof(Tri));
hipMalloc(&dev_meshStartIndices, numMeshes * sizeof(int));
hipMalloc(&dev_meshEndIndices, numMeshes * sizeof(int));
hipMalloc(&dev_bboxTris, sizeof(Tri));
}
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_paths2);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_intersections2);
hipFree(dev_itxnBounce1Cache);
hipFree(dev_pathBounce1Cache);
hipFree(dev_meshTris);
hipFree(dev_meshStartIndices);
hipFree(dev_meshEndIndices);
hipFree(dev_bboxTris);
hipFree(dev_iterationsPerPixel);
hipFree(dev_variancePerPixel);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
// Based on Pharr/Humphrey's Physically Based Rendering textbook, 2nd ed
// Sections 6.2.3 and 13.6
__device__ void concentricSampleDisk(thrust::default_random_engine& rng,
float* dx,
float* dy,
float apSize) {
thrust::uniform_real_distribution<float> un11(-1.0f * apSize, apSize);
float r;
float theta;
float sy = un11(rng);
float sx = un11(rng);
if (sx == 0 && sy == 0) {
*dx = 0;
*dy = 0;
return;
}
if (abs(sx) > abs(sy)) {
r = sx;
theta = (PI * sx) / (sx * 4.0f);
}
else {
r = sy;
theta = (PI / 2.0f) - ((PI * sx) / (sy * 4.0f));
}
float u1 = un11(rng);
float u2 = un11(rng);
r = sqrt(u1);
theta = 2.0f * PI * u2;
*dx = r * cos(theta);
*dy = r * sin(theta);
}
// Based on Pharr/Humphrey's Physically Based Rendering textbook, 2nd ed
// Sections 6.2.3 and 13.6
__device__ void samplePointOnLens(thrust::default_random_engine rng,
float *lensU,
float *lensV,
float lensRadius,
float apSize) {
concentricSampleDisk(rng, lensU, lensV, apSize);
*lensU *= lensRadius;
*lensV *= lensRadius;
}
__global__ void generateRayFromCamera(Camera cam,
int iter,
int traceDepth,
PathSegment* pathSegments,
bool useDOF,
bool antialias)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
float aaShiftx = 0.0f;
float aaShifty = 0.0f;
if (antialias) {
thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f);
aaShiftx = u01(rng);
aaShifty = u01(rng);
}
// calculate initial rays based on pin-hole camera
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + aaShiftx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + aaShifty - (float)cam.resolution.y * 0.5f)
);
if (useDOF) {
// find the point on plane of focus, i.e. the plane on which all rays bent
// by the lens well converge
glm::vec3 pfocus = cam.position + segment.ray.direction * cam.focalDist;
// Offset the ray origins. Rather than all being from one point, they are now
// effectively cast from an aperture
float u, v;
samplePointOnLens(rng, &u, &v, cam.lensRadius, cam.aperture);
segment.ray.origin = cam.position + u * cam.right + v * cam.up;
// recalculate ray direction based on aperture/lens model. Ray now
// points to the point of focus
segment.ray.direction = glm::normalize(pfocus - segment.ray.origin);
}
// initialixe other aspects of path segment
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
//__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
//{
// int x = (blockIdx.x * blockDim.x) + threadIdx.x;
// int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//
// if (x < cam.resolution.x && y < cam.resolution.y) {
// int index = x + (y * cam.resolution.x);
// PathSegment & segment = pathSegments[index];
//
// segment.ray.origin = cam.position;
// segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
//
//
// segment.ray.direction = glm::normalize(cam.view
// - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
// - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
// );
//
// segment.pixelIndex = index;
// segment.remainingBounces = traceDepth;
// }
//}
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in the shader(s).
__global__ void computeIntersections(int depth,
int num_paths,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
ShadeableIntersection * intersections,
Tri * dev_meshTris,
int * dev_meshStartIndices,
int * dev_meshEndIndices,
Tri * dev_bboxTris,
bool useBBox)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
int meshNum = 0;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH) {
int numTris = dev_meshEndIndices[meshNum] - dev_meshStartIndices[meshNum];
t = meshIntersectionTest(geom,
pathSegment.ray,
tmp_intersect,
tmp_normal,
outside,
dev_meshTris + dev_meshStartIndices[meshNum],
numTris,
dev_bboxTris,
useBBox);
meshNum++;
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
pathSegments[path_index].color = glm::vec3(0);
pathSegments[path_index].remainingBounces = 0;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// allShader has conditionals for all BSDFs. It's inefficient, but it gets us stared
__global__ void shadeAllMaterial (
int iter,
int num_paths,
ShadeableIntersection * shadeableIntersections,
PathSegment * pathSegments,
Material * materials){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
PathSegment path = pathSegments[idx];
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0){// && pathSegments[idx].remainingBounces > 0) {
path.color *= (materialColor * material.emittance);
path.remainingBounces = 0;
}
else{// if (pathSegments[idx].remainingBounces > 0){
path.color *= materialColor;
scatterRay(path,
getPointOnRay(path.ray, intersection.t),
intersection.surfaceNormal,
material,
rng);
path.remainingBounces--;
}
pathSegments[idx] = path;
}
}
__device__ glm::vec3 devClampRGB(glm::vec3 col) {
glm::vec3 out;
#pragma unroll
for (int i = 0; i < 3; i++) {
out[i] = min(max(0.0f,col[i]), 255.0f);
}
return out;
}
// Add the current iteration's output to the overall image
//__global__ void finalGather(int nPaths,
// glm::vec3 * image,
// PathSegment * iterationPaths,
// int iter){
// int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//
// if (index < nPaths)
// {
// PathSegment iterationPath = iterationPaths[index];
// // yes we have to clamp here even though there is later clamping
// // otherwise reflective surfaces generate fireflies
// image[iterationPath.pixelIndex] += devClampRGB(iterationPath.color);
// }
//}
__global__ void finalGatherAndCalcCull(int nPaths,
glm::vec3 * image,
PathSegment * iterationPaths,
float iter,
int * dev_iterationsPerPixel,
glm::vec3 * dev_variancePerPixel,
int minSamples,
float pixelVariance){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
glm::vec3 newCol = devClampRGB(iterationPath.color);
glm::vec3 meanCol = image[iterationPath.pixelIndex];
glm::vec3 oldV = dev_variancePerPixel[iterationPath.pixelIndex];
// calculate variance. We know the previous mean and the previous variance,
// and a formula for the new variance after adding an element to the set can
// be found here:
// https://www.quora.com/Is-it-possible-to-calculate-variance-using-old-variance-and-a-new-value
// This is rolling so we need to be calculating it every iteration regardless
// of whether we're below minSamples or not
// p.s. we've cast iter to a float for this
glm::vec3 newV = ((iter - 1.0f) / iter) * (oldV + (meanCol - newCol) * (meanCol - newCol) / iter);
volatile float3 vvv = make_float3(newV.x, newV.y, newV.z);
dev_variancePerPixel[iterationPath.pixelIndex] = newV;
// where we set pixels to be culled
if (iter > minSamples) {
iterationPaths[index].shouldCull =
// if variance for R,G,&B are all under threshold, cull=true
glm::all(glm::lessThan(newV, glm::vec3(pixelVariance)));
}
volatile int foo = glm::all(glm::lessThan(newV, glm::vec3(pixelVariance)));
// yes we have to clamp here even though there is later clamping
// otherwise reflective surfaces generate fireflies
image[iterationPath.pixelIndex] += newCol;
// track the number of iterations per pixel. Not only do we need it
// to calculate the average color per pixel over time, but we can make
// informative images from it
dev_iterationsPerPixel[iterationPath.pixelIndex] += 1;
}
}
// predicate for culling paths based on bounce depth
struct hasBounces{
__device__ bool operator()(const PathSegment &path){
return (path.remainingBounces > 0);
}
};
__global__ void kernScatterPathsAndIntersections(int n,
PathSegment *paths,
const PathSegment *pathsRO,
ShadeableIntersection *intersections,
const ShadeableIntersection *intersectionsRO,
const int *indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}
paths[index] = pathsRO[indices[index]];
intersections[index] = intersectionsRO[indices[index]];
}
__global__ void kernEnumerate(int n, int* indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
indices[index] = index;
}
}
__global__ void kernGetMaterialIds(int n,
int* materialIds,
int* indices,
const ShadeableIntersection* dev_intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
materialIds[index] = dev_intersections[indices[index]].materialId;
}
}
int cullPathsAndSortByMaterial(int num_paths,
const PathSegment* dev_pathsRO,
PathSegment* dev_pathsOut,
const ShadeableIntersection* dev_intersectionsRO,
ShadeableIntersection* dev_intersectionsOut,
const int blockSize1d) {
// cull and sort in one kernel to save on global reads/writes when
// rearranging paths/intersections
// --- Cull ---
int newNumPaths;
int* indices;
// the addr of the last non-culled path
int* partitionMiddle;
hipMalloc((void**)&indices, num_paths * sizeof(int));
int numBlocks = ceil((float)num_paths / blockSize1d);
hipLaunchKernelGGL(( kernEnumerate) , dim3(numBlocks), dim3(blockSize1d) , 0, 0, num_paths, indices);
// effectively sort indices based on whether an object was hit
partitionMiddle = thrust::stable_partition(thrust::device, indices, indices + num_paths, dev_pathsRO, hasBounces());
// do some pointer math to return the index
newNumPaths = partitionMiddle - indices;
// --- Sort by Material ---
// now everything before noHitIndex has hit something. Sort them by their material
if (hst_scene->state.sortMaterials) {
int* materialIds;
hipMalloc((void**)&materialIds, newNumPaths * sizeof(int));
// get material ids. We need to pass indices since we haven't reshuffled intersections yet
numBlocks = ceil((float)newNumPaths / blockSize1d);
kernGetMaterialIds << <numBlocks, blockSize1d >> > (newNumPaths, materialIds, indices, dev_intersectionsRO);
thrust::sort_by_key(thrust::device, materialIds, materialIds + newNumPaths, indices);
hipFree(materialIds);
}
// assign paths/intersections to the sorted indices. now all paths/intersections before `newNumPaths` have hit an obj
// note we have to assign ALL paths and intersections (i.e. use `num_paths` not `newNumPaths`) because some paths wouldn't
// be assigned and/or would be overwritten
numBlocks = ceil((float)num_paths / blockSize1d);
kernScatterPathsAndIntersections << <numBlocks, blockSize1d >> > (num_paths,
dev_pathsOut,
dev_pathsRO,
dev_intersectionsOut,
dev_intersectionsRO,
indices);
//checkCUDAError("scatter");
hipFree(indices);
return newNumPaths;
}
// predicate for culling pixels based on pre-calculated array of ints
struct shouldNotCull{
__device__ bool operator()(const PathSegment &path){
return !path.shouldCull;
}
};
//__global__ void kernGetPixelIds(int n,
// int* pixIds,
// const PathSegment* dev_pathsRO) {
// int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
// pixIds[index] = dev_pathsRO[index].pixelIndex;
// }
//}
int cullPixels(int num_paths,
PathSegment* dev_pathsRO,
PathSegment* dev_pathsOut,
const ShadeableIntersection* dev_intersectionsRO,
ShadeableIntersection* dev_intersectionsOut,
const int blockSize1d) {
// --- Cull ---
int newNumPaths;
int* indices;
int* pixIds;
// the addr of the last non-culled path
int* partitionMiddle;
hipMalloc((void**)&indices, num_paths * sizeof(int));
hipMalloc((void**)&pixIds, num_paths * sizeof(int));
int numBlocks = ceil((float)num_paths / blockSize1d);
hipLaunchKernelGGL(( kernEnumerate) , dim3(numBlocks), dim3(blockSize1d) , 0, 0, num_paths, indices);
//kernGetPixelIds << <numBlocks, blockSize1d >> > (num_paths, pixIds, dev_pathsRO);
//thrust::sort_by_key(thrust::device, pixIds, pixIds + num_paths, dev_pathsRO);
// effectively sort indices based on pre-calculated indicator of if we should cull
// everything that should NOT be culled is placed before everything that should be.
// Then partition middle is the index of the first cullable path
partitionMiddle = thrust::stable_partition(thrust::device,
indices,
indices + num_paths,
dev_pathsRO,
shouldNotCull());
// do some pointer math to return the index
newNumPaths = partitionMiddle - indices;
// assign paths/intersections to the sorted indices. now all paths/intersections before `newNumPaths` have hit an obj
// note we have to assign ALL paths and intersections (i.e. use `num_paths` not `newNumPaths`) because some paths wouldn't
// be assigned and/or would be overwritten
numBlocks = ceil((float)num_paths / blockSize1d);
kernScatterPathsAndIntersections << <numBlocks, blockSize1d >> > (num_paths,
dev_pathsOut,
dev_pathsRO,
dev_intersectionsOut,
dev_intersectionsRO,
indices);
//checkCUDAError("scatter");
hipFree(pixIds);
hipFree(indices);
return newNumPaths;
}
void shade(int iter,
int num_paths,
ShadeableIntersection* dev_intersections,
PathSegment* dev_paths,
Material* dev_materials,
dim3 numblocksPathSegmentTracing,
int blockSize1d) {
shadeAllMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials);
checkCUDAError("shade");
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
int pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
if (pixelcount == numPixelsCulled) {
fprintf(stdout, "All pixels have converged after %i iterations, exiting\n", iter);
return 1;
}
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount - numPixelsCulled;
int num_paths = dev_path_end - dev_paths;
int preBounceCullNumPaths = num_paths;
int prePixCullNumPaths = pixelcount;
PathSegment* pathSwp;
ShadeableIntersection* intxnSwp; // for ping ponging buffers
if (hst_scene->state.cacheFirstBounce && iter > 1) {
hipMemcpy(dev_intersections,
dev_itxnBounce1Cache,
num_paths * sizeof(ShadeableIntersection),
hipMemcpyDeviceToDevice);
checkCUDAError("copying itxn cache");
hipMemcpy(dev_paths,
dev_pathBounce1Cache,
num_paths * sizeof(PathSegment),
hipMemcpyDeviceToDevice);
checkCUDAError("copying path cache");
depth=1;
}
else {
// cast camera rays using either the DOF kernel or the pinhole kernel
hipLaunchKernelGGL(( generateRayFromCamera), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, cam,
iter,
traceDepth,
dev_paths,
hst_scene->state.useDOF,
hst_scene->state.antialias);
checkCUDAError("generate camera ray");
}
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth,
num_paths,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_intersections,
dev_meshTris,
dev_meshStartIndices,
dev_meshEndIndices,
dev_bboxTris,
hst_scene->state.useBBox);
depth++;
// --- cull dead-end paths ---
num_paths = cullPathsAndSortByMaterial(preBounceCullNumPaths,
dev_paths,
dev_paths2,
dev_intersections,
dev_intersections2,
blockSize1d);
checkCUDAError("cull");
// ping-pong buffers after culling.
pathSwp = dev_paths;
dev_paths = dev_paths2;
dev_paths2 = pathSwp;
intxnSwp = dev_intersections;
dev_intersections = dev_intersections2;
dev_intersections2 = intxnSwp;
if (iter == 1 && depth == 1 && hst_scene->state.cacheFirstBounce) {
hipMemcpy(dev_itxnBounce1Cache,
dev_intersections,
preBounceCullNumPaths * sizeof(ShadeableIntersection),
hipMemcpyDeviceToDevice);
checkCUDAError("reading itxn cache");
hipMemcpy(dev_pathBounce1Cache,
dev_paths,
preBounceCullNumPaths * sizeof(PathSegment),
hipMemcpyDeviceToDevice);
checkCUDAError("reading path cache");
}
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shade(iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
numblocksPathSegmentTracing,
blockSize1d);
if (depth >= traceDepth || num_paths == 0) {
iterationComplete = true;
}
}
checkCUDAError("somewhere in main loop");
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
//if (hst_scene->state.useAdaptiveSampling) {
hipLaunchKernelGGL(( finalGatherAndCalcCull), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, preBounceCullNumPaths,
dev_image,
dev_paths,
iter,
dev_iterationsPerPixel,
dev_variancePerPixel,
hst_scene->state.minSamples,
hst_scene->state.pixelVariance);
//}
//else {
// finalGather<<<numBlocksPixels, blockSize1d>>>(preBounceCullNumPaths, dev_image, dev_paths, iter);
//}
checkCUDAError("final gather");
// reset num_paths. We've culled some but want the full number next iteration
//num_paths = preBounceCullNumPaths;
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, dev_iterationsPerPixel, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("copying image");
hipMemcpy(hst_scene->state.heatMap.data(), dev_iterationsPerPixel,
pixelcount * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("copying heatmap");
// cull pixels for the next iteration
if (hst_scene->state.useAdaptiveSampling && iter > hst_scene->state.minSamples) {
int pixelCullCount = cullPixels(preBounceCullNumPaths,
dev_paths,
dev_paths2,
dev_intersections,
dev_intersections2,
blockSize1d);
// ping-pong buffers after culling.
pathSwp = dev_paths;
dev_paths = dev_paths2;
dev_paths2 = pathSwp;
intxnSwp = dev_intersections;
dev_intersections = dev_intersections2;
dev_intersections2 = intxnSwp;
checkCUDAError("culling pixels");
numPixelsCulled = pixelCullCount;
}
checkCUDAError("pathtrace");
return 0;
}
| c37b3240a7fc9698530ce64f9f2f48bdb90c3dad.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include <thrust/execution_policy.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int * dev_iterationsPerPixel, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
int iter = dev_iterationsPerPixel[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static PathSegment * dev_paths2 = NULL;
static PathSegment * dev_pathBounce1Cache = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections2 = NULL;
static ShadeableIntersection * dev_itxnBounce1Cache = NULL;
// mesh data
static Tri * dev_meshTris = NULL;
static int* dev_meshStartIndices = NULL;
static int* dev_meshEndIndices = NULL;
static Tri * dev_bboxTris = NULL;
// adaptive sampling data
static int * dev_iterationsPerPixel = NULL;
// note variance has to be calculated for R, G, & B so we need a vec3
static glm::vec3 * dev_variancePerPixel = NULL;
static int numPixelsCulled = 0;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_paths2, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_pathBounce1Cache, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_intersections2, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_itxnBounce1Cache, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// --- init adaptive sampling ---
cudaMalloc(&dev_iterationsPerPixel, pixelcount * sizeof(int));
cudaMemset(dev_iterationsPerPixel, 0, pixelcount * sizeof(int));
cudaMalloc(&dev_variancePerPixel, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_variancePerPixel, 0, pixelcount * sizeof(glm::vec3));
// --- init mesh loading ---
// load the triangles of all meshes into one big array
int totalTris = 0;
int numMeshes = 0;
for (auto& g : hst_scene->geoms) {
if (g.type == MESH) {
totalTris += g.numTris;
numMeshes++;
}
}
// if there are any meshes in the
if (numMeshes) {
cudaMalloc(&dev_meshTris, totalTris * sizeof(Tri));
cudaMalloc(&dev_meshStartIndices, numMeshes * sizeof(int));
cudaMalloc(&dev_meshEndIndices, numMeshes * sizeof(int));
cudaMalloc(&dev_bboxTris, 12 * numMeshes * sizeof(Tri));
// add the tris from all our geo
int startIndex = 0;
int meshNum = 0;
for (auto& g : hst_scene->geoms) {
if (g.type == MESH) {
// copy the tris from this geo, offset the
// start index for the next copy
cudaMemcpy(dev_meshTris + startIndex,
g.tris + startIndex,
g.numTris * sizeof(Tri),
cudaMemcpyHostToDevice);
// copy the start index for this mesh
cudaMemcpy(dev_meshStartIndices + meshNum,
&startIndex,
sizeof(int),
cudaMemcpyHostToDevice);
// incr the start index for the next mesh
startIndex += g.numTris;
// start index for the next mesh is the end index for
// this mesh
cudaMemcpy(dev_meshEndIndices + meshNum,
&startIndex,
sizeof(int),
cudaMemcpyHostToDevice);
// copy the bounding box tris
cudaMemcpy(dev_bboxTris + 12 * meshNum,
g.boundingBox,
12 * sizeof(Tri),
cudaMemcpyHostToDevice);
}
}
}
else {
// declare an empty (nearly) array just because it needs to exist
// for freeing/reference etc.
cudaMalloc(&dev_meshTris, sizeof(Tri));
cudaMalloc(&dev_meshStartIndices, numMeshes * sizeof(int));
cudaMalloc(&dev_meshEndIndices, numMeshes * sizeof(int));
cudaMalloc(&dev_bboxTris, sizeof(Tri));
}
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_paths2);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_intersections2);
cudaFree(dev_itxnBounce1Cache);
cudaFree(dev_pathBounce1Cache);
cudaFree(dev_meshTris);
cudaFree(dev_meshStartIndices);
cudaFree(dev_meshEndIndices);
cudaFree(dev_bboxTris);
cudaFree(dev_iterationsPerPixel);
cudaFree(dev_variancePerPixel);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
// Based on Pharr/Humphrey's Physically Based Rendering textbook, 2nd ed
// Sections 6.2.3 and 13.6
__device__ void concentricSampleDisk(thrust::default_random_engine& rng,
float* dx,
float* dy,
float apSize) {
thrust::uniform_real_distribution<float> un11(-1.0f * apSize, apSize);
float r;
float theta;
float sy = un11(rng);
float sx = un11(rng);
if (sx == 0 && sy == 0) {
*dx = 0;
*dy = 0;
return;
}
if (abs(sx) > abs(sy)) {
r = sx;
theta = (PI * sx) / (sx * 4.0f);
}
else {
r = sy;
theta = (PI / 2.0f) - ((PI * sx) / (sy * 4.0f));
}
float u1 = un11(rng);
float u2 = un11(rng);
r = sqrt(u1);
theta = 2.0f * PI * u2;
*dx = r * cos(theta);
*dy = r * sin(theta);
}
// Based on Pharr/Humphrey's Physically Based Rendering textbook, 2nd ed
// Sections 6.2.3 and 13.6
__device__ void samplePointOnLens(thrust::default_random_engine rng,
float *lensU,
float *lensV,
float lensRadius,
float apSize) {
concentricSampleDisk(rng, lensU, lensV, apSize);
*lensU *= lensRadius;
*lensV *= lensRadius;
}
__global__ void generateRayFromCamera(Camera cam,
int iter,
int traceDepth,
PathSegment* pathSegments,
bool useDOF,
bool antialias)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
float aaShiftx = 0.0f;
float aaShifty = 0.0f;
if (antialias) {
thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f);
aaShiftx = u01(rng);
aaShifty = u01(rng);
}
// calculate initial rays based on pin-hole camera
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + aaShiftx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + aaShifty - (float)cam.resolution.y * 0.5f)
);
if (useDOF) {
// find the point on plane of focus, i.e. the plane on which all rays bent
// by the lens well converge
glm::vec3 pfocus = cam.position + segment.ray.direction * cam.focalDist;
// Offset the ray origins. Rather than all being from one point, they are now
// effectively cast from an aperture
float u, v;
samplePointOnLens(rng, &u, &v, cam.lensRadius, cam.aperture);
segment.ray.origin = cam.position + u * cam.right + v * cam.up;
// recalculate ray direction based on aperture/lens model. Ray now
// points to the point of focus
segment.ray.direction = glm::normalize(pfocus - segment.ray.origin);
}
// initialixe other aspects of path segment
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
//__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
//{
// int x = (blockIdx.x * blockDim.x) + threadIdx.x;
// int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//
// if (x < cam.resolution.x && y < cam.resolution.y) {
// int index = x + (y * cam.resolution.x);
// PathSegment & segment = pathSegments[index];
//
// segment.ray.origin = cam.position;
// segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
//
//
// segment.ray.direction = glm::normalize(cam.view
// - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
// - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
// );
//
// segment.pixelIndex = index;
// segment.remainingBounces = traceDepth;
// }
//}
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in the shader(s).
__global__ void computeIntersections(int depth,
int num_paths,
PathSegment * pathSegments,
Geom * geoms,
int geoms_size,
ShadeableIntersection * intersections,
Tri * dev_meshTris,
int * dev_meshStartIndices,
int * dev_meshEndIndices,
Tri * dev_bboxTris,
bool useBBox)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
int meshNum = 0;
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == MESH) {
int numTris = dev_meshEndIndices[meshNum] - dev_meshStartIndices[meshNum];
t = meshIntersectionTest(geom,
pathSegment.ray,
tmp_intersect,
tmp_normal,
outside,
dev_meshTris + dev_meshStartIndices[meshNum],
numTris,
dev_bboxTris,
useBBox);
meshNum++;
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
pathSegments[path_index].color = glm::vec3(0);
pathSegments[path_index].remainingBounces = 0;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// allShader has conditionals for all BSDFs. It's inefficient, but it gets us stared
__global__ void shadeAllMaterial (
int iter,
int num_paths,
ShadeableIntersection * shadeableIntersections,
PathSegment * pathSegments,
Material * materials){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces);
thrust::uniform_real_distribution<float> u01(0, 1);
PathSegment path = pathSegments[idx];
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0){// && pathSegments[idx].remainingBounces > 0) {
path.color *= (materialColor * material.emittance);
path.remainingBounces = 0;
}
else{// if (pathSegments[idx].remainingBounces > 0){
path.color *= materialColor;
scatterRay(path,
getPointOnRay(path.ray, intersection.t),
intersection.surfaceNormal,
material,
rng);
path.remainingBounces--;
}
pathSegments[idx] = path;
}
}
__device__ glm::vec3 devClampRGB(glm::vec3 col) {
glm::vec3 out;
#pragma unroll
for (int i = 0; i < 3; i++) {
out[i] = min(max(0.0f,col[i]), 255.0f);
}
return out;
}
// Add the current iteration's output to the overall image
//__global__ void finalGather(int nPaths,
// glm::vec3 * image,
// PathSegment * iterationPaths,
// int iter){
// int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//
//
// if (index < nPaths)
// {
// PathSegment iterationPath = iterationPaths[index];
// // yes we have to clamp here even though there is later clamping
// // otherwise reflective surfaces generate fireflies
// image[iterationPath.pixelIndex] += devClampRGB(iterationPath.color);
// }
//}
__global__ void finalGatherAndCalcCull(int nPaths,
glm::vec3 * image,
PathSegment * iterationPaths,
float iter,
int * dev_iterationsPerPixel,
glm::vec3 * dev_variancePerPixel,
int minSamples,
float pixelVariance){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
glm::vec3 newCol = devClampRGB(iterationPath.color);
glm::vec3 meanCol = image[iterationPath.pixelIndex];
glm::vec3 oldV = dev_variancePerPixel[iterationPath.pixelIndex];
// calculate variance. We know the previous mean and the previous variance,
// and a formula for the new variance after adding an element to the set can
// be found here:
// https://www.quora.com/Is-it-possible-to-calculate-variance-using-old-variance-and-a-new-value
// This is rolling so we need to be calculating it every iteration regardless
// of whether we're below minSamples or not
// p.s. we've cast iter to a float for this
glm::vec3 newV = ((iter - 1.0f) / iter) * (oldV + (meanCol - newCol) * (meanCol - newCol) / iter);
volatile float3 vvv = make_float3(newV.x, newV.y, newV.z);
dev_variancePerPixel[iterationPath.pixelIndex] = newV;
// where we set pixels to be culled
if (iter > minSamples) {
iterationPaths[index].shouldCull =
// if variance for R,G,&B are all under threshold, cull=true
glm::all(glm::lessThan(newV, glm::vec3(pixelVariance)));
}
volatile int foo = glm::all(glm::lessThan(newV, glm::vec3(pixelVariance)));
// yes we have to clamp here even though there is later clamping
// otherwise reflective surfaces generate fireflies
image[iterationPath.pixelIndex] += newCol;
// track the number of iterations per pixel. Not only do we need it
// to calculate the average color per pixel over time, but we can make
// informative images from it
dev_iterationsPerPixel[iterationPath.pixelIndex] += 1;
}
}
// predicate for culling paths based on bounce depth
struct hasBounces{
__device__ bool operator()(const PathSegment &path){
return (path.remainingBounces > 0);
}
};
__global__ void kernScatterPathsAndIntersections(int n,
PathSegment *paths,
const PathSegment *pathsRO,
ShadeableIntersection *intersections,
const ShadeableIntersection *intersectionsRO,
const int *indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}
paths[index] = pathsRO[indices[index]];
intersections[index] = intersectionsRO[indices[index]];
}
__global__ void kernEnumerate(int n, int* indices) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
indices[index] = index;
}
}
__global__ void kernGetMaterialIds(int n,
int* materialIds,
int* indices,
const ShadeableIntersection* dev_intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
materialIds[index] = dev_intersections[indices[index]].materialId;
}
}
int cullPathsAndSortByMaterial(int num_paths,
const PathSegment* dev_pathsRO,
PathSegment* dev_pathsOut,
const ShadeableIntersection* dev_intersectionsRO,
ShadeableIntersection* dev_intersectionsOut,
const int blockSize1d) {
// cull and sort in one kernel to save on global reads/writes when
// rearranging paths/intersections
// --- Cull ---
int newNumPaths;
int* indices;
// the addr of the last non-culled path
int* partitionMiddle;
cudaMalloc((void**)&indices, num_paths * sizeof(int));
int numBlocks = ceil((float)num_paths / blockSize1d);
kernEnumerate <<<numBlocks, blockSize1d >>> (num_paths, indices);
// effectively sort indices based on whether an object was hit
partitionMiddle = thrust::stable_partition(thrust::device, indices, indices + num_paths, dev_pathsRO, hasBounces());
// do some pointer math to return the index
newNumPaths = partitionMiddle - indices;
// --- Sort by Material ---
// now everything before noHitIndex has hit something. Sort them by their material
if (hst_scene->state.sortMaterials) {
int* materialIds;
cudaMalloc((void**)&materialIds, newNumPaths * sizeof(int));
// get material ids. We need to pass indices since we haven't reshuffled intersections yet
numBlocks = ceil((float)newNumPaths / blockSize1d);
kernGetMaterialIds << <numBlocks, blockSize1d >> > (newNumPaths, materialIds, indices, dev_intersectionsRO);
thrust::sort_by_key(thrust::device, materialIds, materialIds + newNumPaths, indices);
cudaFree(materialIds);
}
// assign paths/intersections to the sorted indices. now all paths/intersections before `newNumPaths` have hit an obj
// note we have to assign ALL paths and intersections (i.e. use `num_paths` not `newNumPaths`) because some paths wouldn't
// be assigned and/or would be overwritten
numBlocks = ceil((float)num_paths / blockSize1d);
kernScatterPathsAndIntersections << <numBlocks, blockSize1d >> > (num_paths,
dev_pathsOut,
dev_pathsRO,
dev_intersectionsOut,
dev_intersectionsRO,
indices);
//checkCUDAError("scatter");
cudaFree(indices);
return newNumPaths;
}
// predicate for culling pixels based on pre-calculated array of ints
struct shouldNotCull{
__device__ bool operator()(const PathSegment &path){
return !path.shouldCull;
}
};
//__global__ void kernGetPixelIds(int n,
// int* pixIds,
// const PathSegment* dev_pathsRO) {
// int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
// pixIds[index] = dev_pathsRO[index].pixelIndex;
// }
//}
int cullPixels(int num_paths,
PathSegment* dev_pathsRO,
PathSegment* dev_pathsOut,
const ShadeableIntersection* dev_intersectionsRO,
ShadeableIntersection* dev_intersectionsOut,
const int blockSize1d) {
// --- Cull ---
int newNumPaths;
int* indices;
int* pixIds;
// the addr of the last non-culled path
int* partitionMiddle;
cudaMalloc((void**)&indices, num_paths * sizeof(int));
cudaMalloc((void**)&pixIds, num_paths * sizeof(int));
int numBlocks = ceil((float)num_paths / blockSize1d);
kernEnumerate <<<numBlocks, blockSize1d >>> (num_paths, indices);
//kernGetPixelIds << <numBlocks, blockSize1d >> > (num_paths, pixIds, dev_pathsRO);
//thrust::sort_by_key(thrust::device, pixIds, pixIds + num_paths, dev_pathsRO);
// effectively sort indices based on pre-calculated indicator of if we should cull
// everything that should NOT be culled is placed before everything that should be.
// Then partition middle is the index of the first cullable path
partitionMiddle = thrust::stable_partition(thrust::device,
indices,
indices + num_paths,
dev_pathsRO,
shouldNotCull());
// do some pointer math to return the index
newNumPaths = partitionMiddle - indices;
// assign paths/intersections to the sorted indices. now all paths/intersections before `newNumPaths` have hit an obj
// note we have to assign ALL paths and intersections (i.e. use `num_paths` not `newNumPaths`) because some paths wouldn't
// be assigned and/or would be overwritten
numBlocks = ceil((float)num_paths / blockSize1d);
kernScatterPathsAndIntersections << <numBlocks, blockSize1d >> > (num_paths,
dev_pathsOut,
dev_pathsRO,
dev_intersectionsOut,
dev_intersectionsRO,
indices);
//checkCUDAError("scatter");
cudaFree(pixIds);
cudaFree(indices);
return newNumPaths;
}
void shade(int iter,
int num_paths,
ShadeableIntersection* dev_intersections,
PathSegment* dev_paths,
Material* dev_materials,
dim3 numblocksPathSegmentTracing,
int blockSize1d) {
shadeAllMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials);
checkCUDAError("shade");
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
int pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
if (pixelcount == numPixelsCulled) {
fprintf(stdout, "All pixels have converged after %i iterations, exiting\n", iter);
return 1;
}
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount - numPixelsCulled;
int num_paths = dev_path_end - dev_paths;
int preBounceCullNumPaths = num_paths;
int prePixCullNumPaths = pixelcount;
PathSegment* pathSwp;
ShadeableIntersection* intxnSwp; // for ping ponging buffers
if (hst_scene->state.cacheFirstBounce && iter > 1) {
cudaMemcpy(dev_intersections,
dev_itxnBounce1Cache,
num_paths * sizeof(ShadeableIntersection),
cudaMemcpyDeviceToDevice);
checkCUDAError("copying itxn cache");
cudaMemcpy(dev_paths,
dev_pathBounce1Cache,
num_paths * sizeof(PathSegment),
cudaMemcpyDeviceToDevice);
checkCUDAError("copying path cache");
depth=1;
}
else {
// cast camera rays using either the DOF kernel or the pinhole kernel
generateRayFromCamera<<<blocksPerGrid2d, blockSize2d>>>(cam,
iter,
traceDepth,
dev_paths,
hst_scene->state.useDOF,
hst_scene->state.antialias);
checkCUDAError("generate camera ray");
}
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (depth,
num_paths,
dev_paths,
dev_geoms,
hst_scene->geoms.size(),
dev_intersections,
dev_meshTris,
dev_meshStartIndices,
dev_meshEndIndices,
dev_bboxTris,
hst_scene->state.useBBox);
depth++;
// --- cull dead-end paths ---
num_paths = cullPathsAndSortByMaterial(preBounceCullNumPaths,
dev_paths,
dev_paths2,
dev_intersections,
dev_intersections2,
blockSize1d);
checkCUDAError("cull");
// ping-pong buffers after culling.
pathSwp = dev_paths;
dev_paths = dev_paths2;
dev_paths2 = pathSwp;
intxnSwp = dev_intersections;
dev_intersections = dev_intersections2;
dev_intersections2 = intxnSwp;
if (iter == 1 && depth == 1 && hst_scene->state.cacheFirstBounce) {
cudaMemcpy(dev_itxnBounce1Cache,
dev_intersections,
preBounceCullNumPaths * sizeof(ShadeableIntersection),
cudaMemcpyDeviceToDevice);
checkCUDAError("reading itxn cache");
cudaMemcpy(dev_pathBounce1Cache,
dev_paths,
preBounceCullNumPaths * sizeof(PathSegment),
cudaMemcpyDeviceToDevice);
checkCUDAError("reading path cache");
}
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shade(iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
numblocksPathSegmentTracing,
blockSize1d);
if (depth >= traceDepth || num_paths == 0) {
iterationComplete = true;
}
}
checkCUDAError("somewhere in main loop");
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
//if (hst_scene->state.useAdaptiveSampling) {
finalGatherAndCalcCull<<<numBlocksPixels, blockSize1d>>>(preBounceCullNumPaths,
dev_image,
dev_paths,
iter,
dev_iterationsPerPixel,
dev_variancePerPixel,
hst_scene->state.minSamples,
hst_scene->state.pixelVariance);
//}
//else {
// finalGather<<<numBlocksPixels, blockSize1d>>>(preBounceCullNumPaths, dev_image, dev_paths, iter);
//}
checkCUDAError("final gather");
// reset num_paths. We've culled some but want the full number next iteration
//num_paths = preBounceCullNumPaths;
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, dev_iterationsPerPixel, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("copying image");
cudaMemcpy(hst_scene->state.heatMap.data(), dev_iterationsPerPixel,
pixelcount * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("copying heatmap");
// cull pixels for the next iteration
if (hst_scene->state.useAdaptiveSampling && iter > hst_scene->state.minSamples) {
int pixelCullCount = cullPixels(preBounceCullNumPaths,
dev_paths,
dev_paths2,
dev_intersections,
dev_intersections2,
blockSize1d);
// ping-pong buffers after culling.
pathSwp = dev_paths;
dev_paths = dev_paths2;
dev_paths2 = pathSwp;
intxnSwp = dev_intersections;
dev_intersections = dev_intersections2;
dev_intersections2 = intxnSwp;
checkCUDAError("culling pixels");
numPixelsCulled = pixelCullCount;
}
checkCUDAError("pathtrace");
return 0;
}
|
305b69850ee68dfaa5a652ba3473fa227ee6f792.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LogOpGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
// bottom_diff[ind_j] = grad * (prob_data[ind_i] - 0.9); //Oscar add
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
// bottom_diff[ind_j] = grad * (prob_data[ind_j] - 0.1); //Oscar add
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
| 305b69850ee68dfaa5a652ba3473fa227ee6f792.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
LogOpGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
// bottom_diff[ind_j] = grad * (prob_data[ind_i] - 0.9); //Oscar add
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
// bottom_diff[ind_j] = grad * (prob_data[ind_j] - 0.1); //Oscar add
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
e9774c195f3fdac10baf999af1d4b303c16ed27b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
/***
* Matrix product from files. This GPUfs example uses the original matmul from CUDA SDK
* but instead of reading data from memory it reads/writes it from/to files
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*
*/
// Utilities and system includes
#include <cmath>
#include <errno.h>
#include <rocblas.h>
#if 0
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h>
#include <shrUtils.h>
#else
// CUDA 7.5 helper functions.
// #include <helper_functions.h>
// #include <helper_cuda.h>
// #include "shrQATest.h"
#endif
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <gloop/gloop.h>
#define shrLog printf
static char *sSDKsample = "matrixMul";
void init_device_app(){
// CUDA_SAFE_CALL(hipSetDevice(global_devicenum));
CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize,1<<25));
}
#include "matrixMul.h"
// includes, kernels
#include "matrixMul_kernel.hip"
#if 0
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString( err ) );
// exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
// exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( hipSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( hipSetDevice( devID ) );
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void transpose(float*, float*, int,int);
void printDiff(float*, float*, int, int, int, float);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
void inline checkError(hipblasStatus_t status, const char* msg)
{
if(status != HIPBLAS_STATUS_SUCCESS){
printf(msg);
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
shrQAStart(argc, argv);
printf("[ %s ]\n", sSDKsample);
//shrSetLogFileName ("matrixMul.txt");
shrLog("%s\n\tStarting (CUDA and CUBLAS tests)...\n\n", argv[0]);
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
int write_mtx(const char* name, void* data, size_t size,size_t offset=0, int doclose=1, int fd=0){
if(offset==0)
unlink(name);
if(offset==0)
fd=open(name,O_WRONLY|O_CREAT,S_IRWXU);
if (fd<0) {
perror("cant open mtx\n");
exit(-1);
}
if(pwrite(fd, data,size,offset)!=size) {
perror("cant write\n");
exit(-1);
}
//fsync(fd);
if (doclose) {
close(fd);
fd=0;
}
return fd;
}
#define START_CLOCK(var) {(var)=_timestamp();}
#define STOP_CLOCK(var) {(var)=_timestamp()-(var);}
#define CLOCK_IT(var,accumulator, proc) START_CLOCK(var); {proc;} STOP_CLOCK(var); accumulator+=var;
bool runCUDA=true;
void runTest(int argc, char** argv)
{
// use a larger block size for Fermi and above
int block_size = 32;
// Optional Command-line multiplier for matrix sizes
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA;
uiHA = HA;
uiWB = WB;
char* c_HA=getenv("HA");
char* c_WA=getenv("WA");
char* c_WB=getenv("WB");
if (c_HA)
uiHA=HA*atoi(c_HA);
if (c_WA)
uiWA=WA*atoi(c_WA);
if (c_WB)
uiWB=WB*atoi(c_WB);
uiHB = uiWA;
uiWC = uiWB;
uiHC = uiHA;
shrLog("\nUsing Matrix Sizes: A(%u x %u), B(%u x %u), C(%u x %u)\n\n",
uiHA, uiWA, uiHB, uiWB, uiHC, uiWC);
#if 0
if(checkCmdLineFlag(argc, (const char**)argv, "device")) {
int devID = getCmdLineArgumentInt(argc, (const char **)argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
checkCudaErrors( hipSetDevice(gpuGetMaxGflopsDeviceId()) );
}
int devID;
hipDeviceProp_t props;
// get number of SMs on this GPU
checkCudaErrors(hipGetDevice(&devID));
checkCudaErrors(hipGetDeviceProperties(&props, devID));
hipSetDevice(0);
printf("OK?\n");
#endif
// setup execution parameters
dim3 threads(block_size, block_size);
// int perBlockX=1;
// int perBlockY=1;
int NUM_BLOCKS=104;
if (uiHC<104*32)
NUM_BLOCKS=uiHC/32;
int perBlockX=uiWC / threads.x/1;
int perBlockY=uiHC / threads.y/NUM_BLOCKS;
dim3 grid( 1, NUM_BLOCKS);
dim3 gridCUDA(grid.x*perBlockX,grid.y*perBlockY);
printf(" grid size: %dx%d per blockX= %d per blockY= %d\n",grid.x,grid.y,perBlockX,perBlockY);
printf(" uiWA %d uiWB %d \n",uiWA,uiWB);
// volatile GPUGlobals* gpuGlobals;
// initializer(&gpuGlobals);
// init_device_app();
// init_app();
// printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
// set seed for rand()
srand(2006);
char* num_iter= getenv("NUM_ITER");
int NUM_ITERATIONS= (num_iter==NULL)?1: atoi(num_iter);
#if 0
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A =NULL; //(float*)malloc(mem_size_A);
hipHostMalloc(&h_A,mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = NULL;//(float*)malloc(mem_size_B);
hipHostMalloc(&h_B,mem_size_B);
// initialize host memory
randomInit(h_A, size_A); write_mtx("mtx_a",h_A,mem_size_A);
randomInit(h_B, size_B);
write_mtx("mtx_b_orig",h_B,mem_size_B);
fprintf(stderr,"1\n");
float* h_B_t = (float*)malloc(mem_size_B);
transpose(h_B,h_B_t,uiHB,uiWB);
write_mtx("mtx_b",h_B_t,mem_size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("memsize_C=%d\n",mem_size_C);
// allocate host memory for the result
float* h_C = NULL;//(float*) malloc(mem_size_C);
hipHostMalloc(&h_C,mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
unlink("mtx_c");
//unlink("mtx_c_orig");
// kernel warmup
// create and start timer
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
#endif
#if 0
double res_cuda_data=0;
double res_cuda_kernel=0;
double total_time_cuda=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
double time_before_cuda=_timestamp();
int fd=open("mtx_a",O_RDONLY);
if (fd<0) {
perror("cant open mtx_a\n");
exit(-1);
}
if(read(fd, h_A,mem_size_A)!=mem_size_A) {
perror("cant read\n");
exit(-1);
}
close(fd);
fd=open("mtx_b_orig",O_RDONLY);
if (fd<0) {
perror("cant open mtx_b_orig\n");
exit(-1);
}
if(read(fd, h_B,mem_size_B)!=mem_size_B) {
perror("cant read\n");
exit(-1);
}
close(fd);
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
total_time_cuda+=(_timestamp()-time_before_cuda);
fprintf(stderr,"CUDAMemory copy and file read: %.0f\n",(_timestamp()-time_before_cuda)/1000);
res_cuda_data+=(_timestamp()-time_before_cuda);
hipEvent_t e_b; hipEventCreate(&e_b);
hipEvent_t e_e; hipEventCreate(&e_e);
hipEvent_t e_m; hipEventCreate(&e_m);
// execute the warmup kernel
//matrixMulCUDA<32><<<gridCUDA,threads,0,0>>>(d_C,d_A,d_B,uiWA,uiWB);
//hipDeviceSynchronize();
time_before_cuda=_timestamp();
// for(int i=0;i<5;i++){
hipEventRecord(e_b);
printf("MATRIX\n");
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(gridCUDA),dim3(threads),0,0, d_C,d_A,d_B,uiWA,uiWB);
printf("MATRIX DONE\n");
hipDeviceSynchronize();
double time_kernel_only=_timestamp()-time_before_cuda;
hipEventRecord(e_e);
// }
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
hipEventRecord(e_m);
write_mtx("mtx_c_orig",h_C,mem_size_C);
double time_kernel_copyback= _timestamp()-time_before_cuda;
float only_kernel=0;
hipEventElapsedTime(&only_kernel,e_b,e_e);
float only_memcpy=0;
hipEventElapsedTime(&only_memcpy,e_e,e_m);
total_time_cuda+=time_kernel_copyback;
fprintf(stderr,"CUDAtime=%0.f kernel=%.0f memcpy=%.0f filecopy=%.0f gflop %0.3f\n",total_time_cuda/1000, only_kernel,only_memcpy, time_kernel_copyback/1000-only_memcpy-only_kernel, ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time_cuda/1e6) );
res_cuda_data+=(time_kernel_copyback-1000*only_kernel);
res_cuda_kernel=only_kernel*1000;
}
double res_cuda=total_time_cuda;
hipStream_t s[4];
hipStreamCreate(&s[0]);
hipStreamCreate(&s[1]);
hipStreamCreate(&s[2]);
hipStreamCreate(&s[3]);
hipEvent_t e_b; hipEventCreate(&e_b);
hipEvent_t e_e; hipEventCreate(&e_e);
hipEvent_t e_m; hipEventCreate(&e_m);
gridCUDA.y=gridCUDA.y/2;
total_time_cuda=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
double time_before_cuda=_timestamp();
int fd=open("mtx_a",O_RDONLY);
if (fd<0) { perror("cant open mtx_a\n"); exit(-1);}
int fd1=open("mtx_b_orig",O_RDONLY);
if (fd1<0) { perror("cant open mtx_b_orig\n"); exit(-1);}
#define OVERLAPS 2
//for(int x=0;x<OVERLAPS;++)
if(read(fd1, h_B ,mem_size_B)!=mem_size_B) {
perror("cant read\n");
exit(-1);
}
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice,s[0]) );
int tileA=mem_size_A/OVERLAPS;
int tileC=mem_size_C/OVERLAPS;
int f=0;
for(int y=0;y<OVERLAPS;y++){
int offset=mem_size_A/OVERLAPS*y;
if(pread(fd, ((char*)h_A)+offset, tileA,offset)!=tileA) {
perror("cant read\n");
exit(-1);
}
checkCudaErrors(hipMemcpyAsync(((char*)d_A)+offset, ((char*)h_A)+offset, tileA, hipMemcpyHostToDevice,s[y]) );
hipEventRecord(e_b,s[y]);
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(gridCUDA),dim3(threads),0,s[y], d_C+tileC*y/4,d_A+tileA*y/4,d_B,uiWA,uiWB);
hipEventRecord(e_e,s[y]);
checkCudaErrors(hipMemcpyAsync(h_C+tileC*y/4, d_C+tileC*y/4, tileC, hipMemcpyDeviceToHost,s[y]) );
hipEventRecord(e_m,s[y]);
if(y!=0){
checkCudaErrors(hipStreamSynchronize(s[y-1]));
f=write_mtx("mtx_c_orig_tiled",h_C+tileC/4*(y-1),tileC,tileC*(y-1),0,f);
}
}
checkCudaErrors(hipStreamSynchronize(s[OVERLAPS-1]));
write_mtx("mtx_c_orig_tiled",h_C+(OVERLAPS-1)*tileC/4,tileC,(OVERLAPS-1)*tileC,1,f);
double time_kernel_copyback= _timestamp()-time_before_cuda;
float only_kernel=0;
hipEventElapsedTime(&only_kernel,e_b,e_e);
float only_memcpy=0;
hipEventElapsedTime(&only_memcpy,e_e,e_m);
total_time_cuda+=time_kernel_copyback;
fprintf(stderr,"CUDAtime=%0.f kernel=%.0f memcpy=%.0f gflop %0.3f\n",total_time_cuda/1000, only_kernel,only_memcpy, ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time_cuda/1e6) );
close(fd);
close(fd1);
}
double res_tuned=total_time_cuda;
double c_open, c_rw, c_close;
c_open=c_rw=c_close=0;
#endif
double total_time=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, grid);
char fn[]="mtx_c";
fn[0]='0'+zzz;
// unlink(fn);
double time_before=_timestamp();
{
hostLoop->launch(*hostContext, grid, threads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, int wA, int wB, int perBlockX, int perBlockY, char n) {
matrixMul<32>(loop, wA, wB, perBlockX, perBlockY, n);
}, uiWA, uiWB,perBlockX,perBlockY,'0'+zzz);
}
// matrixMul<32><<< grid, threads,0,gpuGlobals->streamMgr->kernelStream >>>(uiWA, uiWB,perBlockX,perBlockY,'0'+zzz);
// run_gpufs_handler(gpuGlobals,0);
// hipError_t error= hipDeviceSynchronize();
double time_after=_timestamp();
total_time+=(time_after-time_before);
fprintf(stderr,"GPUFS >>>Total time=%0.f \n", (time_after-time_before)/1000);
//Check for errors and failed asserts in asynchronous kernel launch.
// if(error != hipSuccess ) {
// printf("Device failed, CUDA error message is: %s\n\n", hipGetErrorString(error));
// }
// stop and destroy timer
fprintf(stderr,"GPUFS >>>Total time=%0.f Gflops= %.3f\n", total_time/1000,((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time/1e6));
// delete gpuGlobals;
PRINT_MALLOC;
PRINT_FREE;
PRINT_PAGE_ALLOC_RETRIES;
PRINT_LOCKLESS_SUCCESS;
PRINT_WRONG_FILE_ID;
PRINT_RT_MALLOC;
PRINT_RT_FREE;
PRINT_HT_MISS;
PRINT_PRECLOSE_PUSH;
PRINT_PRECLOSE_FETCH;
PRINT_HT_HIT;
PRINT_FLUSHED_READ;
PRINT_FLUSHED_WRITE;
PRINT_TRY_LOCK_FAILED;
}
// fprintf(stderr, "GPUFS open: %.0f, rw %.0f, close %.0f usec\n",c_open,c_rw,c_close);
// fprintf(stderr,"kernel is complete\n");
// fprintf(stderr,"Max pending requests: %d\n",max_req);
// fprintf(stderr,"Transfer time - not including sync: %.3f\n",transfer_time);
#if 0
char fn[]="mtx_c";
fn[0]='0';
int fd=open(fn,O_RDONLY);
if (fd<0) {
perror("cant open mtx_c\n");
exit(-1);
}
if(read(fd, h_C,mem_size_C)!=mem_size_C) {
perror("cant read\n");
exit(-1);
}
close(fd);
fd=open("mtx_c_orig_tiled",O_RDONLY);
if (fd<0) {
perror("cant open mtx_c_orig\n");
exit(-1);
}
if(read(fd, h_CUBLAS,mem_size_C)!=mem_size_C) {
perror("cant read orig\n");
exit(-1);
}
close(fd);
printf("Comparing CUBLAS & Host results\n");
bool resCUBLAS = sdkCompareL2fe(h_C, h_CUBLAS, size_C, 1.0e-6f);
if (resCUBLAS != true) {
printDiff(h_C, h_CUBLAS, uiWC, uiHC, 10000, 1.0e-5f);
}
fprintf(stderr,"CUBLAS compares %s\n\n", (true == resCUBLAS) ? "OK" : "FAIL");
#endif
#define FLOP(t) ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(t/1e6)
// fprintf(stderr,"RESULTS: %d %d %d %d %d %d %.0f %.0f %.0f %.3f %.3f %.3f %.0f %.0f %.3f \n",uiHA,uiWA,uiWB,uiHA*uiWA,uiWA*uiWB,uiHA*uiWB, res_cuda,res_tuned,total_time,FLOP(res_cuda),FLOP(res_tuned),FLOP(total_time), res_cuda_data, res_cuda_kernel, res_cuda_data/res_cuda_kernel);
// clean up memory
#if 0
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
#endif
// hipDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
printf("size: %d\n",size);
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
//data[i]=1;
}
void transpose(float*data, float* newData, int hight, int width){
for(int i=0;i<hight;i++){
for( int j=0;j<width;j++){
newData[j*hight+i]=data[i*width+j];
}
}
}
/*
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol){
for ( int i=0;i<height*width;i+=1024){
for (int z=0;z<1024;z++){
if (((int) data1[i+z])!=i/1024 ) { printf("problem %.8f @ %d %d\n", data1[i+z],i+z,i/1024);}
// printf("%.0f ", data1[i+z]);
}
}
}
*/
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol)
{
shrLog("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
int i,j,k;
int error_count=0;
for (j = 0; j < height; j++)
{
if (error_count < iListLength) {
shrLog("\n Row %d:\n", j);
}
for (i = 0; i < width; i++) {
k = j * width + i;
float fDiff = ::fabs(data1[k] - data2[k]);
if (fDiff > fListTol) {
if (error_count < iListLength) {
shrLog(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
}
error_count++;
}
}
}
shrLog(" \n Total Errors = %d\n\n", error_count);
}
| e9774c195f3fdac10baf999af1d4b303c16ed27b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
/***
* Matrix product from files. This GPUfs example uses the original matmul from CUDA SDK
* but instead of reading data from memory it reads/writes it from/to files
*/
/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*
*/
// Utilities and system includes
#include <cmath>
#include <errno.h>
#include <cublas_v2.h>
#if 0
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h>
#include <shrUtils.h>
#else
// CUDA 7.5 helper functions.
// #include <helper_functions.h>
// #include <helper_cuda.h>
// #include "shrQATest.h"
#endif
#include <cuda_runtime.h>
#include <unistd.h>
#include <gloop/gloop.h>
#define shrLog printf
static char *sSDKsample = "matrixMul";
void init_device_app(){
// CUDA_SAFE_CALL(cudaSetDevice(global_devicenum));
CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize,1<<25));
}
#include "matrixMul.h"
// includes, kernels
#include "matrixMul_kernel.cu"
#if 0
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
// exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
// exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( cudaSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
cudaDeviceProp deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( cudaSetDevice( devID ) );
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
void randomInit(float*, int);
void transpose(float*, float*, int,int);
void printDiff(float*, float*, int, int, int, float);
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
void inline checkError(cublasStatus_t status, const char* msg)
{
if(status != CUBLAS_STATUS_SUCCESS){
printf(msg);
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
shrQAStart(argc, argv);
printf("[ %s ]\n", sSDKsample);
//shrSetLogFileName ("matrixMul.txt");
shrLog("%s\n\tStarting (CUDA and CUBLAS tests)...\n\n", argv[0]);
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
int write_mtx(const char* name, void* data, size_t size,size_t offset=0, int doclose=1, int fd=0){
if(offset==0)
unlink(name);
if(offset==0)
fd=open(name,O_WRONLY|O_CREAT,S_IRWXU);
if (fd<0) {
perror("cant open mtx\n");
exit(-1);
}
if(pwrite(fd, data,size,offset)!=size) {
perror("cant write\n");
exit(-1);
}
//fsync(fd);
if (doclose) {
close(fd);
fd=0;
}
return fd;
}
#define START_CLOCK(var) {(var)=_timestamp();}
#define STOP_CLOCK(var) {(var)=_timestamp()-(var);}
#define CLOCK_IT(var,accumulator, proc) START_CLOCK(var); {proc;} STOP_CLOCK(var); accumulator+=var;
bool runCUDA=true;
void runTest(int argc, char** argv)
{
// use a larger block size for Fermi and above
int block_size = 32;
// Optional Command-line multiplier for matrix sizes
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA;
uiHA = HA;
uiWB = WB;
char* c_HA=getenv("HA");
char* c_WA=getenv("WA");
char* c_WB=getenv("WB");
if (c_HA)
uiHA=HA*atoi(c_HA);
if (c_WA)
uiWA=WA*atoi(c_WA);
if (c_WB)
uiWB=WB*atoi(c_WB);
uiHB = uiWA;
uiWC = uiWB;
uiHC = uiHA;
shrLog("\nUsing Matrix Sizes: A(%u x %u), B(%u x %u), C(%u x %u)\n\n",
uiHA, uiWA, uiHB, uiWB, uiHC, uiWC);
#if 0
if(checkCmdLineFlag(argc, (const char**)argv, "device")) {
int devID = getCmdLineArgumentInt(argc, (const char **)argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
checkCudaErrors( cudaSetDevice(gpuGetMaxGflopsDeviceId()) );
}
int devID;
cudaDeviceProp props;
// get number of SMs on this GPU
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&props, devID));
cudaSetDevice(0);
printf("OK?\n");
#endif
// setup execution parameters
dim3 threads(block_size, block_size);
// int perBlockX=1;
// int perBlockY=1;
int NUM_BLOCKS=104;
if (uiHC<104*32)
NUM_BLOCKS=uiHC/32;
int perBlockX=uiWC / threads.x/1;
int perBlockY=uiHC / threads.y/NUM_BLOCKS;
dim3 grid( 1, NUM_BLOCKS);
dim3 gridCUDA(grid.x*perBlockX,grid.y*perBlockY);
printf(" grid size: %dx%d per blockX= %d per blockY= %d\n",grid.x,grid.y,perBlockX,perBlockY);
printf(" uiWA %d uiWB %d \n",uiWA,uiWB);
// volatile GPUGlobals* gpuGlobals;
// initializer(&gpuGlobals);
// init_device_app();
// init_app();
// printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
// set seed for rand()
srand(2006);
char* num_iter= getenv("NUM_ITER");
int NUM_ITERATIONS= (num_iter==NULL)?1: atoi(num_iter);
#if 0
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A =NULL; //(float*)malloc(mem_size_A);
cudaMallocHost(&h_A,mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = NULL;//(float*)malloc(mem_size_B);
cudaMallocHost(&h_B,mem_size_B);
// initialize host memory
randomInit(h_A, size_A); write_mtx("mtx_a",h_A,mem_size_A);
randomInit(h_B, size_B);
write_mtx("mtx_b_orig",h_B,mem_size_B);
fprintf(stderr,"1\n");
float* h_B_t = (float*)malloc(mem_size_B);
transpose(h_B,h_B_t,uiHB,uiWB);
write_mtx("mtx_b",h_B_t,mem_size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("memsize_C=%d\n",mem_size_C);
// allocate host memory for the result
float* h_C = NULL;//(float*) malloc(mem_size_C);
cudaMallocHost(&h_C,mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
unlink("mtx_c");
//unlink("mtx_c_orig");
// kernel warmup
// create and start timer
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
#endif
#if 0
double res_cuda_data=0;
double res_cuda_kernel=0;
double total_time_cuda=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
double time_before_cuda=_timestamp();
int fd=open("mtx_a",O_RDONLY);
if (fd<0) {
perror("cant open mtx_a\n");
exit(-1);
}
if(read(fd, h_A,mem_size_A)!=mem_size_A) {
perror("cant read\n");
exit(-1);
}
close(fd);
fd=open("mtx_b_orig",O_RDONLY);
if (fd<0) {
perror("cant open mtx_b_orig\n");
exit(-1);
}
if(read(fd, h_B,mem_size_B)!=mem_size_B) {
perror("cant read\n");
exit(-1);
}
close(fd);
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
total_time_cuda+=(_timestamp()-time_before_cuda);
fprintf(stderr,"CUDAMemory copy and file read: %.0f\n",(_timestamp()-time_before_cuda)/1000);
res_cuda_data+=(_timestamp()-time_before_cuda);
cudaEvent_t e_b; cudaEventCreate(&e_b);
cudaEvent_t e_e; cudaEventCreate(&e_e);
cudaEvent_t e_m; cudaEventCreate(&e_m);
// execute the warmup kernel
//matrixMulCUDA<32><<<gridCUDA,threads,0,0>>>(d_C,d_A,d_B,uiWA,uiWB);
//cudaDeviceSynchronize();
time_before_cuda=_timestamp();
// for(int i=0;i<5;i++){
cudaEventRecord(e_b);
printf("MATRIX\n");
matrixMulCUDA<32><<<gridCUDA,threads,0,0>>>(d_C,d_A,d_B,uiWA,uiWB);
printf("MATRIX DONE\n");
cudaDeviceSynchronize();
double time_kernel_only=_timestamp()-time_before_cuda;
cudaEventRecord(e_e);
// }
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
cudaEventRecord(e_m);
write_mtx("mtx_c_orig",h_C,mem_size_C);
double time_kernel_copyback= _timestamp()-time_before_cuda;
float only_kernel=0;
cudaEventElapsedTime(&only_kernel,e_b,e_e);
float only_memcpy=0;
cudaEventElapsedTime(&only_memcpy,e_e,e_m);
total_time_cuda+=time_kernel_copyback;
fprintf(stderr,"CUDAtime=%0.f kernel=%.0f memcpy=%.0f filecopy=%.0f gflop %0.3f\n",total_time_cuda/1000, only_kernel,only_memcpy, time_kernel_copyback/1000-only_memcpy-only_kernel, ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time_cuda/1e6) );
res_cuda_data+=(time_kernel_copyback-1000*only_kernel);
res_cuda_kernel=only_kernel*1000;
}
double res_cuda=total_time_cuda;
cudaStream_t s[4];
cudaStreamCreate(&s[0]);
cudaStreamCreate(&s[1]);
cudaStreamCreate(&s[2]);
cudaStreamCreate(&s[3]);
cudaEvent_t e_b; cudaEventCreate(&e_b);
cudaEvent_t e_e; cudaEventCreate(&e_e);
cudaEvent_t e_m; cudaEventCreate(&e_m);
gridCUDA.y=gridCUDA.y/2;
total_time_cuda=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
double time_before_cuda=_timestamp();
int fd=open("mtx_a",O_RDONLY);
if (fd<0) { perror("cant open mtx_a\n"); exit(-1);}
int fd1=open("mtx_b_orig",O_RDONLY);
if (fd1<0) { perror("cant open mtx_b_orig\n"); exit(-1);}
#define OVERLAPS 2
//for(int x=0;x<OVERLAPS;++)
if(read(fd1, h_B ,mem_size_B)!=mem_size_B) {
perror("cant read\n");
exit(-1);
}
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice,s[0]) );
int tileA=mem_size_A/OVERLAPS;
int tileC=mem_size_C/OVERLAPS;
int f=0;
for(int y=0;y<OVERLAPS;y++){
int offset=mem_size_A/OVERLAPS*y;
if(pread(fd, ((char*)h_A)+offset, tileA,offset)!=tileA) {
perror("cant read\n");
exit(-1);
}
checkCudaErrors(cudaMemcpyAsync(((char*)d_A)+offset, ((char*)h_A)+offset, tileA, cudaMemcpyHostToDevice,s[y]) );
cudaEventRecord(e_b,s[y]);
matrixMulCUDA<32><<<gridCUDA,threads,0,s[y]>>>(d_C+tileC*y/4,d_A+tileA*y/4,d_B,uiWA,uiWB);
cudaEventRecord(e_e,s[y]);
checkCudaErrors(cudaMemcpyAsync(h_C+tileC*y/4, d_C+tileC*y/4, tileC, cudaMemcpyDeviceToHost,s[y]) );
cudaEventRecord(e_m,s[y]);
if(y!=0){
checkCudaErrors(cudaStreamSynchronize(s[y-1]));
f=write_mtx("mtx_c_orig_tiled",h_C+tileC/4*(y-1),tileC,tileC*(y-1),0,f);
}
}
checkCudaErrors(cudaStreamSynchronize(s[OVERLAPS-1]));
write_mtx("mtx_c_orig_tiled",h_C+(OVERLAPS-1)*tileC/4,tileC,(OVERLAPS-1)*tileC,1,f);
double time_kernel_copyback= _timestamp()-time_before_cuda;
float only_kernel=0;
cudaEventElapsedTime(&only_kernel,e_b,e_e);
float only_memcpy=0;
cudaEventElapsedTime(&only_memcpy,e_e,e_m);
total_time_cuda+=time_kernel_copyback;
fprintf(stderr,"CUDAtime=%0.f kernel=%.0f memcpy=%.0f gflop %0.3f\n",total_time_cuda/1000, only_kernel,only_memcpy, ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time_cuda/1e6) );
close(fd);
close(fd1);
}
double res_tuned=total_time_cuda;
double c_open, c_rw, c_close;
c_open=c_rw=c_close=0;
#endif
double total_time=0;
for(int zzz=0;zzz<NUM_ITERATIONS;zzz++){
std::unique_ptr<gloop::HostLoop> hostLoop = gloop::HostLoop::create(0);
std::unique_ptr<gloop::HostContext> hostContext = gloop::HostContext::create(*hostLoop, grid);
char fn[]="mtx_c";
fn[0]='0'+zzz;
// unlink(fn);
double time_before=_timestamp();
{
hostLoop->launch(*hostContext, grid, threads, [=] GLOOP_DEVICE_LAMBDA (gloop::DeviceLoop<>* loop, int wA, int wB, int perBlockX, int perBlockY, char n) {
matrixMul<32>(loop, wA, wB, perBlockX, perBlockY, n);
}, uiWA, uiWB,perBlockX,perBlockY,'0'+zzz);
}
// matrixMul<32><<< grid, threads,0,gpuGlobals->streamMgr->kernelStream >>>(uiWA, uiWB,perBlockX,perBlockY,'0'+zzz);
// run_gpufs_handler(gpuGlobals,0);
// cudaError_t error= cudaDeviceSynchronize();
double time_after=_timestamp();
total_time+=(time_after-time_before);
fprintf(stderr,"GPUFS >>>Total time=%0.f \n", (time_after-time_before)/1000);
//Check for errors and failed asserts in asynchronous kernel launch.
// if(error != cudaSuccess ) {
// printf("Device failed, CUDA error message is: %s\n\n", cudaGetErrorString(error));
// }
// stop and destroy timer
fprintf(stderr,"GPUFS >>>Total time=%0.f Gflops= %.3f\n", total_time/1000,((double)uiHA*uiWA*uiWB*2)/(1<<30)/(total_time/1e6));
// delete gpuGlobals;
PRINT_MALLOC;
PRINT_FREE;
PRINT_PAGE_ALLOC_RETRIES;
PRINT_LOCKLESS_SUCCESS;
PRINT_WRONG_FILE_ID;
PRINT_RT_MALLOC;
PRINT_RT_FREE;
PRINT_HT_MISS;
PRINT_PRECLOSE_PUSH;
PRINT_PRECLOSE_FETCH;
PRINT_HT_HIT;
PRINT_FLUSHED_READ;
PRINT_FLUSHED_WRITE;
PRINT_TRY_LOCK_FAILED;
}
// fprintf(stderr, "GPUFS open: %.0f, rw %.0f, close %.0f usec\n",c_open,c_rw,c_close);
// fprintf(stderr,"kernel is complete\n");
// fprintf(stderr,"Max pending requests: %d\n",max_req);
// fprintf(stderr,"Transfer time - not including sync: %.3f\n",transfer_time);
#if 0
char fn[]="mtx_c";
fn[0]='0';
int fd=open(fn,O_RDONLY);
if (fd<0) {
perror("cant open mtx_c\n");
exit(-1);
}
if(read(fd, h_C,mem_size_C)!=mem_size_C) {
perror("cant read\n");
exit(-1);
}
close(fd);
fd=open("mtx_c_orig_tiled",O_RDONLY);
if (fd<0) {
perror("cant open mtx_c_orig\n");
exit(-1);
}
if(read(fd, h_CUBLAS,mem_size_C)!=mem_size_C) {
perror("cant read orig\n");
exit(-1);
}
close(fd);
printf("Comparing CUBLAS & Host results\n");
bool resCUBLAS = sdkCompareL2fe(h_C, h_CUBLAS, size_C, 1.0e-6f);
if (resCUBLAS != true) {
printDiff(h_C, h_CUBLAS, uiWC, uiHC, 10000, 1.0e-5f);
}
fprintf(stderr,"CUBLAS compares %s\n\n", (true == resCUBLAS) ? "OK" : "FAIL");
#endif
#define FLOP(t) ((double)uiHA*uiWA*uiWB*2)/(1<<30)/(t/1e6)
// fprintf(stderr,"RESULTS: %d %d %d %d %d %d %.0f %.0f %.0f %.3f %.3f %.3f %.0f %.0f %.3f \n",uiHA,uiWA,uiWB,uiHA*uiWA,uiWA*uiWB,uiHA*uiWB, res_cuda,res_tuned,total_time,FLOP(res_cuda),FLOP(res_tuned),FLOP(total_time), res_cuda_data, res_cuda_kernel, res_cuda_data/res_cuda_kernel);
// clean up memory
#if 0
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
#endif
// cudaDeviceReset();
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
printf("size: %d\n",size);
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
//data[i]=1;
}
void transpose(float*data, float* newData, int hight, int width){
for(int i=0;i<hight;i++){
for( int j=0;j<width;j++){
newData[j*hight+i]=data[i*width+j];
}
}
}
/*
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol){
for ( int i=0;i<height*width;i+=1024){
for (int z=0;z<1024;z++){
if (((int) data1[i+z])!=i/1024 ) { printf("problem %.8f @ %d %d\n", data1[i+z],i+z,i/1024);}
// printf("%.0f ", data1[i+z]);
}
}
}
*/
void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol)
{
shrLog("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
int i,j,k;
int error_count=0;
for (j = 0; j < height; j++)
{
if (error_count < iListLength) {
shrLog("\n Row %d:\n", j);
}
for (i = 0; i < width; i++) {
k = j * width + i;
float fDiff = std::fabs(data1[k] - data2[k]);
if (fDiff > fListTol) {
if (error_count < iListLength) {
shrLog(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
}
error_count++;
}
}
}
shrLog(" \n Total Errors = %d\n\n", error_count);
}
|
a0b5a0cf93ad5d9929c080b7bfcbe8ebea9600f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include "reductionMax.hh"
#include "volumeRender_kernel.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
typedef struct {
float4 m[3];
} float3x4;
typedef unsigned short VolumeType;
//typedef float VolumeType;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
texture<VolumeType, 3, hipReadModeElementType> tex; // 3D texture
texture<VolumeType, 3, hipReadModeElementType> tex_cluster; // 3D texture
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__
float4 color_interpolate_cluster(float sample){
// ACCENT
// if(sample <= 1)
// return make_float4((float)0.99215,(float)0.75294, (float)0.52549, 1.0);
// else if(sample <= 2)
// return make_float4( (float)0.498, (float)0.7882, (float)0.498, 0.25);
// else if(sample <= 3)
// return make_float4((float)0.74509,(float)0.68235, (float)0.83137, 1.0);
// else if(sample <= 4)
// return make_float4(1.0,1.0,1.0,1.0);
// Dark2
if(sample <= 1)
return make_float4( 0.8509803921569,0.3725490196078,0.007843137254902, 1.0);
else if(sample <= 2)
return make_float4( 0.1058823529412, 0.6196078431373, 0.4666666666667, 0.25);
else if(sample <= 3)
return make_float4( 0.4588235294118,0.4392156862745,0.7019607843137, 1.0);
else if(sample <= 4)
return make_float4(1.0,1.0,1.0,1.0);
return make_float4(0.0,0.0,0.0,0.0);
}
__device__
float4 color_interpolate_large(float sample, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six){
float4 retcolor = make_float4(0);
float percent = 0.0f;
if(sample <= 0.2f){
percent = (0.2f - sample) / 0.2f;
retcolor = (percent)*one + (1.0f-percent) * two;
}else if(sample > 0.2f && sample <= 0.3f){
percent = (0.3f - sample) / 0.1f;
retcolor = (percent)*two + (1.0f-percent) * three;
}else if(sample > 0.3f && sample <= 0.4f){
percent = (0.4f - sample) / 0.1f;
retcolor = (percent)*three + (1.0f-percent) * four;
}else if(sample > 0.4f && sample <= 0.5f){
percent = (0.5f - sample) / 0.1f;
retcolor = (percent)*four + (1.0f-percent) * five;
}else{
percent = (1.0 - sample) / 0.5f;
retcolor = (percent)*five + (1.0f-percent) * six;
}
return retcolor;
}
__device__
float4 color_interpolate(float sample, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six){
float4 retcolor = make_float4(0);
float percent = 0.0f;
if(sample <= 25500.0f){
percent = (25500.0f - sample) / 25500.0f;
retcolor = (percent)*one + (1.0f-percent) * two;
}else if(sample > 25500.0f && sample <= 26500.0f){
percent = (26500.0f - sample) / 1000.0f;
retcolor = (percent)*two + (1.0f-percent) * three;
}else if(sample > 26500.0f && sample <= 27500.0f){
percent = (27500.0f - sample) / 1000.0f;
retcolor = (percent)*three + (1.0f-percent) * four;
}else if(sample > 27500.0f && sample <= 28500.0f){
percent = (28500.0f - sample) / 1000.0f;
retcolor = (percent)*four + (1.0f-percent) * five;
}else{
percent = (65535.0f - sample) / 65535.0f;
retcolor = (percent)*five + (1.0f-percent) * six;
}
return retcolor;
}
__device__ uint rgbaFloatToInt(float4 rgba, float global_max, float red, float green, float blue)
{
rgba.x = rgba.x / (global_max+2);
rgba.y = rgba.y / (global_max+2);
rgba.z = rgba.z / (global_max+2);
rgba.w = 0.5;
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__global__ void
d_render(float4 *d_iColors, ushort *data,
float *d_iRed, float *d_iGreen, float *d_iBlue, uint imageW, uint imageH,
float density, float brightness, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six, int type)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
float sample = 0;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
if(type == 0)
sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
else
sample = tex3D(tex_cluster, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
float4 col = make_float4(0.0f);
// lookup in transfer function texture
if(type == 0)
col = color_interpolate(sample,one,two,three,four,five,six);
else
col = color_interpolate_cluster(sample);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col;//*(1.0f - sum.w);
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
d_iColors[y*imageW + x] = sum;
d_iRed[y*imageW + x] = sum.x;
d_iGreen[y*imageW + x] = sum.y;
d_iBlue[y*imageW + x] = sum.z;
}
__global__
void create_image(uint *output, float4 *d_iColors, float global_max, float red, float green, float blue, uint imageW, uint imageH){
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
output[y*imageH+x] = rgbaFloatToInt(d_iColors[y*imageW+x], global_max, red, green, blue);
}
void setup_cluster(void *cluster, hipExtent volumeSize, uint image_size, hipArray *d_volumeArray_cluster){
// Cluster setup
// create 3D array
hipChannelFormatDesc channelDesc_cluster = hipCreateChannelDesc<VolumeType>();
cutilSafeCall( hipMalloc3DArray(&d_volumeArray_cluster, &channelDesc_cluster, volumeSize) );
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(cluster, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray_cluster;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
// set texture parameters
tex_cluster.normalized = true; // access with normalized texture coordinates
tex_cluster.filterMode = hipFilterModePoint; // linear interpolation
tex_cluster.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex_cluster.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
cutilSafeCall(hipBindTextureToArray(tex_cluster, d_volumeArray_cluster, channelDesc_cluster));
}
void setup_volume(void *h_volume, hipExtent volumeSize, uint image_size, hipArray *d_volumeArray){
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>();
cutilSafeCall( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) );
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
cutilSafeCall( hipMemcpy3D(©Params) );
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = hipFilterModePoint; // linear interpolation
tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
cutilSafeCall(hipBindTextureToArray(tex, d_volumeArray, channelDesc));
}
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint *d_cluster, float* d_iRed, float* d_oRed,
float* d_iGreen, float* d_oGreen, float* d_iBlue, float* d_oBlue, float4* d_iColors, unsigned short* data,
unsigned short *cluster_data, uint imageW, uint imageH, float density, float brightness,
float4 one, float4 two, float4 three, float4 four, float4 five, float4 six,
void *h_volume, void *cluster, hipExtent volumeSize, hipArray *d_volumeArray, hipArray *d_volumeArray_cluster, int *set)
{
int size = imageH * imageW;
if(set[0] == 0){
setup_volume(h_volume, volumeSize, size, d_volumeArray);
set[0] = 1;
}
if(set[1] == 0){
setup_cluster(cluster, volumeSize, size, d_volumeArray_cluster);
set[1] = 1;
}
/* clear colors buffers */
cutilSafeCall(hipMemset(d_iColors, 0, imageH*imageW*sizeof(float4)));
cutilSafeCall(hipMemset(d_iRed, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(hipMemset(d_oRed, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(hipMemset(d_iGreen, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(hipMemset(d_oGreen, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(hipMemset(d_iBlue, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(hipMemset(d_oBlue, 0, imageH*imageW*sizeof(float)));
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_iColors, data, d_iRed, d_iGreen, d_iBlue, imageW, imageH, density, brightness,
one, two, three, four, five, six, 0);
float max_red = reduce_max(d_oRed, d_iRed, size);
float max_green = reduce_max(d_oGreen, d_iGreen, size);
float max_blue = reduce_max(d_oBlue, d_iBlue, size);
float global_max = fmax(max_red, max_green);
global_max = fmax(global_max, max_blue);
hipLaunchKernelGGL(( create_image), dim3(gridSize), dim3(blockSize), 0, 0, d_output, d_iColors, global_max, max_red, max_green, max_blue, imageW, imageH);
// render image
//
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_iColors, cluster_data, d_iRed, d_iGreen, d_iBlue, imageW, imageH, density, brightness,
one, two, three, four, five, six, 1);
max_red = reduce_max(d_oRed, d_iRed, size);
max_green = reduce_max(d_oGreen, d_iGreen, size);
max_blue = reduce_max(d_oBlue, d_iBlue, size);
global_max = fmax(max_red, max_green);
global_max = fmax(global_max, max_blue);
hipLaunchKernelGGL(( create_image), dim3(gridSize), dim3(blockSize), 0, 0, d_cluster, d_iColors, global_max, max_red, max_green, max_blue, imageW, imageH);
}
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
cutilSafeCall( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| a0b5a0cf93ad5d9929c080b7bfcbe8ebea9600f3.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include "reductionMax.hh"
#include "volumeRender_kernel.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
typedef struct {
float4 m[3];
} float3x4;
typedef unsigned short VolumeType;
//typedef float VolumeType;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
texture<VolumeType, 3, cudaReadModeElementType> tex; // 3D texture
texture<VolumeType, 3, cudaReadModeElementType> tex_cluster; // 3D texture
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__
float4 color_interpolate_cluster(float sample){
// ACCENT
// if(sample <= 1)
// return make_float4((float)0.99215,(float)0.75294, (float)0.52549, 1.0);
// else if(sample <= 2)
// return make_float4( (float)0.498, (float)0.7882, (float)0.498, 0.25);
// else if(sample <= 3)
// return make_float4((float)0.74509,(float)0.68235, (float)0.83137, 1.0);
// else if(sample <= 4)
// return make_float4(1.0,1.0,1.0,1.0);
// Dark2
if(sample <= 1)
return make_float4( 0.8509803921569,0.3725490196078,0.007843137254902, 1.0);
else if(sample <= 2)
return make_float4( 0.1058823529412, 0.6196078431373, 0.4666666666667, 0.25);
else if(sample <= 3)
return make_float4( 0.4588235294118,0.4392156862745,0.7019607843137, 1.0);
else if(sample <= 4)
return make_float4(1.0,1.0,1.0,1.0);
return make_float4(0.0,0.0,0.0,0.0);
}
__device__
float4 color_interpolate_large(float sample, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six){
float4 retcolor = make_float4(0);
float percent = 0.0f;
if(sample <= 0.2f){
percent = (0.2f - sample) / 0.2f;
retcolor = (percent)*one + (1.0f-percent) * two;
}else if(sample > 0.2f && sample <= 0.3f){
percent = (0.3f - sample) / 0.1f;
retcolor = (percent)*two + (1.0f-percent) * three;
}else if(sample > 0.3f && sample <= 0.4f){
percent = (0.4f - sample) / 0.1f;
retcolor = (percent)*three + (1.0f-percent) * four;
}else if(sample > 0.4f && sample <= 0.5f){
percent = (0.5f - sample) / 0.1f;
retcolor = (percent)*four + (1.0f-percent) * five;
}else{
percent = (1.0 - sample) / 0.5f;
retcolor = (percent)*five + (1.0f-percent) * six;
}
return retcolor;
}
__device__
float4 color_interpolate(float sample, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six){
float4 retcolor = make_float4(0);
float percent = 0.0f;
if(sample <= 25500.0f){
percent = (25500.0f - sample) / 25500.0f;
retcolor = (percent)*one + (1.0f-percent) * two;
}else if(sample > 25500.0f && sample <= 26500.0f){
percent = (26500.0f - sample) / 1000.0f;
retcolor = (percent)*two + (1.0f-percent) * three;
}else if(sample > 26500.0f && sample <= 27500.0f){
percent = (27500.0f - sample) / 1000.0f;
retcolor = (percent)*three + (1.0f-percent) * four;
}else if(sample > 27500.0f && sample <= 28500.0f){
percent = (28500.0f - sample) / 1000.0f;
retcolor = (percent)*four + (1.0f-percent) * five;
}else{
percent = (65535.0f - sample) / 65535.0f;
retcolor = (percent)*five + (1.0f-percent) * six;
}
return retcolor;
}
__device__ uint rgbaFloatToInt(float4 rgba, float global_max, float red, float green, float blue)
{
rgba.x = rgba.x / (global_max+2);
rgba.y = rgba.y / (global_max+2);
rgba.z = rgba.z / (global_max+2);
rgba.w = 0.5;
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__global__ void
d_render(float4 *d_iColors, ushort *data,
float *d_iRed, float *d_iGreen, float *d_iBlue, uint imageW, uint imageH,
float density, float brightness, float4 one, float4 two, float4 three,
float4 four, float4 five, float4 six, int type)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
float sample = 0;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
if(type == 0)
sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
else
sample = tex3D(tex_cluster, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
float4 col = make_float4(0.0f);
// lookup in transfer function texture
if(type == 0)
col = color_interpolate(sample,one,two,three,four,five,six);
else
col = color_interpolate_cluster(sample);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col;//*(1.0f - sum.w);
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
d_iColors[y*imageW + x] = sum;
d_iRed[y*imageW + x] = sum.x;
d_iGreen[y*imageW + x] = sum.y;
d_iBlue[y*imageW + x] = sum.z;
}
__global__
void create_image(uint *output, float4 *d_iColors, float global_max, float red, float green, float blue, uint imageW, uint imageH){
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
output[y*imageH+x] = rgbaFloatToInt(d_iColors[y*imageW+x], global_max, red, green, blue);
}
void setup_cluster(void *cluster, cudaExtent volumeSize, uint image_size, cudaArray *d_volumeArray_cluster){
// Cluster setup
// create 3D array
cudaChannelFormatDesc channelDesc_cluster = cudaCreateChannelDesc<VolumeType>();
cutilSafeCall( cudaMalloc3DArray(&d_volumeArray_cluster, &channelDesc_cluster, volumeSize) );
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(cluster, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray_cluster;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
// set texture parameters
tex_cluster.normalized = true; // access with normalized texture coordinates
tex_cluster.filterMode = cudaFilterModePoint; // linear interpolation
tex_cluster.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex_cluster.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
cutilSafeCall(cudaBindTextureToArray(tex_cluster, d_volumeArray_cluster, channelDesc_cluster));
}
void setup_volume(void *h_volume, cudaExtent volumeSize, uint image_size, cudaArray *d_volumeArray){
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>();
cutilSafeCall( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) );
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
cutilSafeCall( cudaMemcpy3D(©Params) );
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = cudaFilterModePoint; // linear interpolation
tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
cutilSafeCall(cudaBindTextureToArray(tex, d_volumeArray, channelDesc));
}
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint *d_cluster, float* d_iRed, float* d_oRed,
float* d_iGreen, float* d_oGreen, float* d_iBlue, float* d_oBlue, float4* d_iColors, unsigned short* data,
unsigned short *cluster_data, uint imageW, uint imageH, float density, float brightness,
float4 one, float4 two, float4 three, float4 four, float4 five, float4 six,
void *h_volume, void *cluster, cudaExtent volumeSize, cudaArray *d_volumeArray, cudaArray *d_volumeArray_cluster, int *set)
{
int size = imageH * imageW;
if(set[0] == 0){
setup_volume(h_volume, volumeSize, size, d_volumeArray);
set[0] = 1;
}
if(set[1] == 0){
setup_cluster(cluster, volumeSize, size, d_volumeArray_cluster);
set[1] = 1;
}
/* clear colors buffers */
cutilSafeCall(cudaMemset(d_iColors, 0, imageH*imageW*sizeof(float4)));
cutilSafeCall(cudaMemset(d_iRed, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(cudaMemset(d_oRed, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(cudaMemset(d_iGreen, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(cudaMemset(d_oGreen, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(cudaMemset(d_iBlue, 0, imageH*imageW*sizeof(float)));
cutilSafeCall(cudaMemset(d_oBlue, 0, imageH*imageW*sizeof(float)));
d_render<<<gridSize, blockSize>>>(d_iColors, data, d_iRed, d_iGreen, d_iBlue, imageW, imageH, density, brightness,
one, two, three, four, five, six, 0);
float max_red = reduce_max(d_oRed, d_iRed, size);
float max_green = reduce_max(d_oGreen, d_iGreen, size);
float max_blue = reduce_max(d_oBlue, d_iBlue, size);
float global_max = fmax(max_red, max_green);
global_max = fmax(global_max, max_blue);
create_image<<<gridSize, blockSize>>>(d_output, d_iColors, global_max, max_red, max_green, max_blue, imageW, imageH);
// render image
//
d_render<<<gridSize, blockSize>>>(d_iColors, cluster_data, d_iRed, d_iGreen, d_iBlue, imageW, imageH, density, brightness,
one, two, three, four, five, six, 1);
max_red = reduce_max(d_oRed, d_iRed, size);
max_green = reduce_max(d_oGreen, d_iGreen, size);
max_blue = reduce_max(d_oBlue, d_iBlue, size);
global_max = fmax(max_red, max_green);
global_max = fmax(global_max, max_blue);
create_image<<<gridSize, blockSize>>>(d_cluster, d_iColors, global_max, max_red, max_green, max_blue, imageW, imageH);
}
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
cutilSafeCall( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
83e6a3a3a4878b4f4e80af7f2e70a07cd8aad856.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
// This sample uses the compressible memory allocation if device supports it
// and performs saxpy on it.
// Compressible memory may give better performance if the data is amenable to
// compression.
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CUDA_DRIVER_API
#include "helper_cuda.h"
#include "compMalloc.h"
__global__ void saxpy(const float a, const float4 *x, const float4 *y, float4 *z, const size_t n)
{
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x)
{
const float4 x4 = x[i];
const float4 y4 = y[i];
z[i] = make_float4(a * x4.x + y4.x, a * x4.y + y4.y,
a * x4.z + y4.z, a * x4.w + y4.w);
}
}
__global__ void init(float4 *x, float4 *y, float4 *z, const float val, const size_t n)
{
const float4 val4 = make_float4(val, val, val, val);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x)
{
z[i] = x[i] = y[i] = val4;
}
}
void launchSaxpy(const float a, float4 *x, float4 *y, float4 *z, const size_t n, const float init_val)
{
hipEvent_t start, stop;
float ms;
int blockSize;
int minGridSize;
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)init));
dim3 threads = dim3(blockSize, 1, 1);
dim3 blocks = dim3(minGridSize, 1, 1);
hipLaunchKernelGGL(( init), dim3(blocks), dim3(threads), 0, 0, x, y, z, init_val, n);
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)saxpy));
threads = dim3(blockSize, 1, 1);
blocks = dim3(minGridSize, 1, 1);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( saxpy), dim3(blocks), dim3(threads), 0, 0, a, x, y, z, n);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&ms, start, stop));
const size_t size = n * sizeof(float4);
printf("Running saxpy with %d blocks x %d threads = %.3f ms %.3f TB/s\n", blocks.x, threads.x, ms, (size*3)/ms/1e9);
}
int main(int argc, char **argv)
{
const size_t n = 10485760;
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
exit(EXIT_SUCCESS);
}
findCudaDevice(argc, (const char**)argv);
hipDevice_t currentDevice;
checkCudaErrors(hipCtxGetDevice(¤tDevice));
// Check that the selected device supports virtual memory management
int vmm_supported = -1;
checkCudaErrors(hipDeviceGetAttribute(&vmm_supported,
CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
currentDevice));
if (vmm_supported == 0) {
printf("Device %d doesn't support Virtual Memory Management, waiving the execution.\n", currentDevice);
exit(EXIT_WAIVED);
}
int isCompressionAvailable;
checkCudaErrors(hipDeviceGetAttribute(&isCompressionAvailable,
CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED,
currentDevice));
if (isCompressionAvailable == 0)
{
printf("Device %d doesn't support Generic memory compression, waiving the execution.\n", currentDevice);
exit(EXIT_WAIVED);
}
printf("Generic memory compression support is available\n");
float4 *x, *y, *z;
const size_t size = n * sizeof(float4);
// Allocating compressible memory
checkCudaErrors(allocateCompressible((void **)&x, size, true));
checkCudaErrors(allocateCompressible((void **)&y, size, true));
checkCudaErrors(allocateCompressible((void **)&z, size, true));
printf("Running saxpy on %zu bytes of Compressible memory\n", size);
const float a = 1.0f;
const float init_val = 1.0f;
launchSaxpy(a, x, y, z, n, init_val);
checkCudaErrors(freeCompressible(x, size, true));
checkCudaErrors(freeCompressible(y, size, true));
checkCudaErrors(freeCompressible(z, size, true));
printf("Running saxpy on %zu bytes of Non-Compressible memory\n", size);
// Allocating non-compressible memory
checkCudaErrors(allocateCompressible((void **)&x, size, false));
checkCudaErrors(allocateCompressible((void **)&y, size, false));
checkCudaErrors(allocateCompressible((void **)&z, size, false));
launchSaxpy(a, x, y, z, n, init_val);
checkCudaErrors(freeCompressible(x, size, false));
checkCudaErrors(freeCompressible(y, size, false));
checkCudaErrors(freeCompressible(z, size, false));
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
return EXIT_SUCCESS;
} | 83e6a3a3a4878b4f4e80af7f2e70a07cd8aad856.cu | /* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
// This sample uses the compressible memory allocation if device supports it
// and performs saxpy on it.
// Compressible memory may give better performance if the data is amenable to
// compression.
#include <stdio.h>
#include <cuda.h>
#define CUDA_DRIVER_API
#include "helper_cuda.h"
#include "compMalloc.h"
__global__ void saxpy(const float a, const float4 *x, const float4 *y, float4 *z, const size_t n)
{
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x)
{
const float4 x4 = x[i];
const float4 y4 = y[i];
z[i] = make_float4(a * x4.x + y4.x, a * x4.y + y4.y,
a * x4.z + y4.z, a * x4.w + y4.w);
}
}
__global__ void init(float4 *x, float4 *y, float4 *z, const float val, const size_t n)
{
const float4 val4 = make_float4(val, val, val, val);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x)
{
z[i] = x[i] = y[i] = val4;
}
}
void launchSaxpy(const float a, float4 *x, float4 *y, float4 *z, const size_t n, const float init_val)
{
cudaEvent_t start, stop;
float ms;
int blockSize;
int minGridSize;
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)init));
dim3 threads = dim3(blockSize, 1, 1);
dim3 blocks = dim3(minGridSize, 1, 1);
init<<<blocks, threads>>>(x, y, z, init_val, n);
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)saxpy));
threads = dim3(blockSize, 1, 1);
blocks = dim3(minGridSize, 1, 1);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
saxpy<<<blocks, threads>>>(a, x, y, z, n);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&ms, start, stop));
const size_t size = n * sizeof(float4);
printf("Running saxpy with %d blocks x %d threads = %.3f ms %.3f TB/s\n", blocks.x, threads.x, ms, (size*3)/ms/1e9);
}
int main(int argc, char **argv)
{
const size_t n = 10485760;
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
exit(EXIT_SUCCESS);
}
findCudaDevice(argc, (const char**)argv);
CUdevice currentDevice;
checkCudaErrors(cuCtxGetDevice(¤tDevice));
// Check that the selected device supports virtual memory management
int vmm_supported = -1;
checkCudaErrors(cuDeviceGetAttribute(&vmm_supported,
CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
currentDevice));
if (vmm_supported == 0) {
printf("Device %d doesn't support Virtual Memory Management, waiving the execution.\n", currentDevice);
exit(EXIT_WAIVED);
}
int isCompressionAvailable;
checkCudaErrors(cuDeviceGetAttribute(&isCompressionAvailable,
CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED,
currentDevice));
if (isCompressionAvailable == 0)
{
printf("Device %d doesn't support Generic memory compression, waiving the execution.\n", currentDevice);
exit(EXIT_WAIVED);
}
printf("Generic memory compression support is available\n");
float4 *x, *y, *z;
const size_t size = n * sizeof(float4);
// Allocating compressible memory
checkCudaErrors(allocateCompressible((void **)&x, size, true));
checkCudaErrors(allocateCompressible((void **)&y, size, true));
checkCudaErrors(allocateCompressible((void **)&z, size, true));
printf("Running saxpy on %zu bytes of Compressible memory\n", size);
const float a = 1.0f;
const float init_val = 1.0f;
launchSaxpy(a, x, y, z, n, init_val);
checkCudaErrors(freeCompressible(x, size, true));
checkCudaErrors(freeCompressible(y, size, true));
checkCudaErrors(freeCompressible(z, size, true));
printf("Running saxpy on %zu bytes of Non-Compressible memory\n", size);
// Allocating non-compressible memory
checkCudaErrors(allocateCompressible((void **)&x, size, false));
checkCudaErrors(allocateCompressible((void **)&y, size, false));
checkCudaErrors(allocateCompressible((void **)&z, size, false));
launchSaxpy(a, x, y, z, n, init_val);
checkCudaErrors(freeCompressible(x, size, false));
checkCudaErrors(freeCompressible(y, size, false));
checkCudaErrors(freeCompressible(z, size, false));
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
return EXIT_SUCCESS;
} |
ed605410d3fb38acad4c615aa4f855bd2058800b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <iostream>
#include <string>
//===> FINITE DIFFERENCES PARAMETERS <===//
#define DT 0.05f //->Time in milliseconds
#define DX ( 12.0f / MODELSIZE_X ) //->Displacement in x
#define DY ( 12.0f / MODELSIZE_Y ) //->Displacement in y
//===> CONSTANTES <===//
#define Eh 3.0f
#define En 1.0f
#define Re 0.6f
#define tauE 5.0f
#define tauN 250.0f
#define gam 0.001f
#define East 1.5415f
//===> INITIAL CONDITIONS <===//
#define v0 0.5f
#define VOLT0 3.0f
//==> DISCRETE DOMAIN <==//
#ifndef MODEL_WIDTH
#define MODEL_WIDTH 0
#endif
#define MODELSIZE_X (MODEL_WIDTH)
#define MODELSIZE_Y (MODEL_WIDTH)
#define MODELSIZE_Z 1
#define MODELSIZE2D ( MODELSIZE_X*MODELSIZE_Y )
//==> CUDA THREAD BLOCK <==//
//#define TILESIZE 32
//#define BLOCKDIM_X ( TILESIZE )
//#define BLOCKDIM_Y ( TILESIZE )
#ifndef BLOCKDIM_X
#define BLOCKDIM_X 32
#endif
#ifndef BLOCKDIM_Y
#define BLOCKDIM_Y 32
#endif
#define BLOCKDIM_Z 1
#define BLOCKDIM2D ( BLOCKDIM_X*BLOCKDIM_Y )
//==> CUDA GRID <==//
#define GRIDDIM_X ( ( MODELSIZE_X / BLOCKDIM_X ) + ( ( MODELSIZE_X % BLOCKDIM_X ) > 0 ) )
#define GRIDDIM_Y ( ( MODELSIZE_Y / BLOCKDIM_Y ) + ( ( MODELSIZE_Y % BLOCKDIM_Y ) > 0 ) )
#define GRIDDIM_Z 1
//////////////////////////////////////////////////////////////////////////
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//////////////////////////////////////////////////////////////////////////
__global__ void timeStep( const float *voltIN, float *v, float *voltOUT )
{
int x = blockIdx.x*BLOCKDIM_X + threadIdx.x;
int y = blockIdx.y*BLOCKDIM_Y + threadIdx.y;
__shared__ float U[BLOCKDIM_X+2][BLOCKDIM_Y+2];
if ( x < MODELSIZE_X && y < MODELSIZE_Y )
{
//
int idx = y*MODELSIZE_X + x;
int i = threadIdx.x+1;
int j = threadIdx.y+1;
U[i][j] = voltIN[idx];
__syncthreads();
float rv = v[idx];
if ( threadIdx.y == 0 )
U[i][0] = voltIN[(idx - ((y>0)-(y==0))*MODELSIZE_X)];
else if ( threadIdx.y == (BLOCKDIM_Y-1) )
U[i][(BLOCKDIM_Y+1)] = voltIN[(idx + ((y<MODELSIZE_Y-1)-(y==MODELSIZE_Y-1))*MODELSIZE_X)];
if ( threadIdx.x == 0 )
U[0][j] = voltIN[(idx - (x>0) + (x==0))];
else if ( threadIdx.x == (BLOCKDIM_X-1) )
U[(BLOCKDIM_X+1)][j] = voltIN[(idx + (x<MODELSIZE_X-1)-(x==MODELSIZE_X-1))];
float Rn = ( 1.0f / ( 1.0f - expf(-Re) ) ) - rv;
float p = ( U[i][j] > En ) * 1.0f;
float dv = ( Rn * p - ( 1.0f - p ) * rv ) / tauN;
float Dn = rv * rv;
float hE = ( 1.0f - tanh(U[i][j] - Eh) ) * U[i][j] * U[i][j] / 2.0f;
float du = ( ( ( East - Dn ) * hE ) - U[i][j] ) / tauE;
float xlapr = U[i+1][j] - U[i][j];
float xlapl = U[i][j] - U[i-1][j];
float xlapf = U[i][j+1] - U[i][j];
float xlapb = U[i][j] - U[i][j-1];
float lap = xlapr - xlapl + xlapf - xlapb;
voltOUT[idx] = ( U[i][j] + ( du * DT ) + ( lap * DT * gam / ( DX * DX ) ) );
v[idx] = rv + dv*DT;
}
}
int main( int argc, char *argv[] )
{
int nsteps = 3; //8000;
// if ( argc > 1 )
// {
// char *p;
// long conv = strtol(argv[1], &p, 10);
// //
// // Check for errors: e.g., the string does not represent an integer
// // or the integer is larger than int
// if (*p != '\0' || conv > INT_MAX)
// {
// printf("Error with argument 1!");
// return 3;
// }
// else
// nsteps = int(conv/DT);
// }
if (argc > 1)
{
nsteps = atoi(argv[1]);
}
//
hipEvent_t dstart,dstop;
hipEventCreate( &dstart );
hipEventCreate( &dstop );
//
long start, end;
struct timeval timecheck;
gettimeofday(&timecheck, NULL);
start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
//
float *hvolt, *hv;
hvolt = (float*) malloc( MODELSIZE2D*sizeof(float) );
hv = (float*) malloc( MODELSIZE2D*sizeof(float) );
// int x, y, idx;
// for( y = 0; y < MODELSIZE_Y; y++ )
// {
// for( x = 0; x < MODELSIZE_X; x++ )
// {
// idx = y*MODELSIZE_X + x;
// //
// hv[idx] = 0.5f;
// //
// if ( y < 10*(MODELSIZE_Y/20) && y > 8*(MODELSIZE_Y/20) && x < 10*(MODELSIZE_Y/20) && x > 8*(MODELSIZE_Y/20))
// hvolt[idx] = VOLT0;
// else
// hvolt[idx] = 0.0f;
// //
// }
// }
FILE *arq;
arq = fopen("entrada.txt", "rt");
for(int i=0;i<MODELSIZE_X;i++)
for(int j=0;j<MODELSIZE_Y;j++)
{
hv[i+j*MODELSIZE_X] = 0.5f;
int temp;
fscanf(arq," %d",&temp);
hvolt[i+j*MODELSIZE_X] = temp;
}
fclose(arq);
// FILE *prof;
// char fpname[100];
// sprintf(fpname, "./profiles_%d_k2D_shared.csv",MODELSIZE_X);
// prof = fopen(fpname,"w");
// fprintf(prof,"index,timestep,P\n");
// fprintf(prof,"0,%6.4f",0.0);
// fclose(prof);
dim3 point;
//int pointIdx;
point.x = MODELSIZE_X/2;
point.y = MODELSIZE_Y/2;
point.z = 0;
// pointIdx = point.y*MODELSIZE_X + point.x;
//fprintf(prof,",%6.4f\n",hvolt[pointIdx]);
float *dvoltA, *dvoltB, *dv;
HANDLE_ERROR( hipMalloc( (void**)&dvoltA, MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dvoltB, MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**)&dv , MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( hipMemcpy( dvoltA, hvolt, MODELSIZE2D*sizeof(float), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dvoltB, hvolt, MODELSIZE2D*sizeof(float), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dv , hv , MODELSIZE2D*sizeof(float), hipMemcpyHostToDevice ) );
free( hv );
dim3 blocks(GRIDDIM_X,GRIDDIM_Y,GRIDDIM_Z);
dim3 threads(BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z);
//int nsamples = (nsteps >= 2000)*2000 + (nsteps < 2000)*nsteps;
//int j = nsteps/nsamples;
hipDeviceSynchronize();
hipEventRecord( dstart, 0 );
int i=0;
for (i = 0; i < nsteps; i++ )
{
if ( (i%2) == 0 ) //==> EVEN
hipLaunchKernelGGL(( timeStep), dim3(blocks), dim3(threads), 0, 0, dvoltA, dv, dvoltB );
else //==> ODD
hipLaunchKernelGGL(( timeStep), dim3(blocks), dim3(threads), 0, 0, dvoltB, dv, dvoltA );
//
/*if ( (i%j) == 0 ) {
if ( (i%2) == 0 ) //==> EVEN
HANDLE_ERROR( hipMemcpy( hvolt, dvoltB, MODELSIZE3D*sizeof(float), hipMemcpyDeviceToHost ) );
else //==> ODD
HANDLE_ERROR( hipMemcpy( hvolt, dvoltA, MODELSIZE3D*sizeof(float), hipMemcpyDeviceToHost ) );
//
fprintf(prof,"%d,%6.4f,%6.4f\n", (i+1), ((i+1)*DT), hvolt[pointIdx]);
}*/
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
}
hipDeviceSynchronize();
hipEventRecord( dstop, 0 );
hipEventSynchronize ( dstop );
float elapsed;
hipEventElapsedTime( &elapsed, dstart, dstop );
//printf("GPU elapsed time: %f s (%f milliseconds)\n", (elapsed/1000.0), elapsed);
//arq = fopen("TempoExecucaoOrig12000.txt", "a");
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
//fprintf (arq,"[%d,%.5f],\n",MODEL_WIDTH,elapsed);
printf ("[%d,%.5f]",0,elapsed);
//fclose(arq);
// if ( (i%2) == 0 )
// HANDLE_ERROR( hipMemcpy( hvolt, dvoltA, MODELSIZE2D*sizeof(float), hipMemcpyDeviceToHost ) );
// else
// HANDLE_ERROR( hipMemcpy( hvolt, dvoltB, MODELSIZE2D*sizeof(float), hipMemcpyDeviceToHost ) );
// arq = fopen("resultado.txt", "wt");
// for(int i=0;i<MODELSIZE_X;i++)
// {
// for(int j=0;j<MODELSIZE_Y;j++)
// {
// fprintf(arq," %6.4f",hvolt[i+j*MODELSIZE_X]);
// }
// fprintf(arq,"\n");
// }
// fclose(arq);
//fclose( prof );
free( hvolt );
hipFree( dvoltA );
hipFree( dvoltB );
hipFree( dv );
//
// hipDeviceSynchronize();
// gettimeofday(&timecheck, NULL);
// end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
//printf("CPU elapsed time: %f s (%ld milliseconds)\n", ((end - start)/1000.0), (end - start));
//
hipEventDestroy( dstart );
hipEventDestroy( dstop );
hipDeviceReset();
//
return 0;
}
| ed605410d3fb38acad4c615aa4f855bd2058800b.cu | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <iostream>
#include <string>
//===> FINITE DIFFERENCES PARAMETERS <===//
#define DT 0.05f //->Time in milliseconds
#define DX ( 12.0f / MODELSIZE_X ) //->Displacement in x
#define DY ( 12.0f / MODELSIZE_Y ) //->Displacement in y
//===> CONSTANTES <===//
#define Eh 3.0f
#define En 1.0f
#define Re 0.6f
#define tauE 5.0f
#define tauN 250.0f
#define gam 0.001f
#define East 1.5415f
//===> INITIAL CONDITIONS <===//
#define v0 0.5f
#define VOLT0 3.0f
//==> DISCRETE DOMAIN <==//
#ifndef MODEL_WIDTH
#define MODEL_WIDTH 0
#endif
#define MODELSIZE_X (MODEL_WIDTH)
#define MODELSIZE_Y (MODEL_WIDTH)
#define MODELSIZE_Z 1
#define MODELSIZE2D ( MODELSIZE_X*MODELSIZE_Y )
//==> CUDA THREAD BLOCK <==//
//#define TILESIZE 32
//#define BLOCKDIM_X ( TILESIZE )
//#define BLOCKDIM_Y ( TILESIZE )
#ifndef BLOCKDIM_X
#define BLOCKDIM_X 32
#endif
#ifndef BLOCKDIM_Y
#define BLOCKDIM_Y 32
#endif
#define BLOCKDIM_Z 1
#define BLOCKDIM2D ( BLOCKDIM_X*BLOCKDIM_Y )
//==> CUDA GRID <==//
#define GRIDDIM_X ( ( MODELSIZE_X / BLOCKDIM_X ) + ( ( MODELSIZE_X % BLOCKDIM_X ) > 0 ) )
#define GRIDDIM_Y ( ( MODELSIZE_Y / BLOCKDIM_Y ) + ( ( MODELSIZE_Y % BLOCKDIM_Y ) > 0 ) )
#define GRIDDIM_Z 1
//////////////////////////////////////////////////////////////////////////
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//////////////////////////////////////////////////////////////////////////
__global__ void timeStep( const float *voltIN, float *v, float *voltOUT )
{
int x = blockIdx.x*BLOCKDIM_X + threadIdx.x;
int y = blockIdx.y*BLOCKDIM_Y + threadIdx.y;
__shared__ float U[BLOCKDIM_X+2][BLOCKDIM_Y+2];
if ( x < MODELSIZE_X && y < MODELSIZE_Y )
{
//
int idx = y*MODELSIZE_X + x;
int i = threadIdx.x+1;
int j = threadIdx.y+1;
U[i][j] = voltIN[idx];
__syncthreads();
float rv = v[idx];
if ( threadIdx.y == 0 )
U[i][0] = voltIN[(idx - ((y>0)-(y==0))*MODELSIZE_X)];
else if ( threadIdx.y == (BLOCKDIM_Y-1) )
U[i][(BLOCKDIM_Y+1)] = voltIN[(idx + ((y<MODELSIZE_Y-1)-(y==MODELSIZE_Y-1))*MODELSIZE_X)];
if ( threadIdx.x == 0 )
U[0][j] = voltIN[(idx - (x>0) + (x==0))];
else if ( threadIdx.x == (BLOCKDIM_X-1) )
U[(BLOCKDIM_X+1)][j] = voltIN[(idx + (x<MODELSIZE_X-1)-(x==MODELSIZE_X-1))];
float Rn = ( 1.0f / ( 1.0f - expf(-Re) ) ) - rv;
float p = ( U[i][j] > En ) * 1.0f;
float dv = ( Rn * p - ( 1.0f - p ) * rv ) / tauN;
float Dn = rv * rv;
float hE = ( 1.0f - tanh(U[i][j] - Eh) ) * U[i][j] * U[i][j] / 2.0f;
float du = ( ( ( East - Dn ) * hE ) - U[i][j] ) / tauE;
float xlapr = U[i+1][j] - U[i][j];
float xlapl = U[i][j] - U[i-1][j];
float xlapf = U[i][j+1] - U[i][j];
float xlapb = U[i][j] - U[i][j-1];
float lap = xlapr - xlapl + xlapf - xlapb;
voltOUT[idx] = ( U[i][j] + ( du * DT ) + ( lap * DT * gam / ( DX * DX ) ) );
v[idx] = rv + dv*DT;
}
}
int main( int argc, char *argv[] )
{
int nsteps = 3; //8000;
// if ( argc > 1 )
// {
// char *p;
// long conv = strtol(argv[1], &p, 10);
// //
// // Check for errors: e.g., the string does not represent an integer
// // or the integer is larger than int
// if (*p != '\0' || conv > INT_MAX)
// {
// printf("Error with argument 1!");
// return 3;
// }
// else
// nsteps = int(conv/DT);
// }
if (argc > 1)
{
nsteps = atoi(argv[1]);
}
//
cudaEvent_t dstart,dstop;
cudaEventCreate( &dstart );
cudaEventCreate( &dstop );
//
long start, end;
struct timeval timecheck;
gettimeofday(&timecheck, NULL);
start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
//
float *hvolt, *hv;
hvolt = (float*) malloc( MODELSIZE2D*sizeof(float) );
hv = (float*) malloc( MODELSIZE2D*sizeof(float) );
// int x, y, idx;
// for( y = 0; y < MODELSIZE_Y; y++ )
// {
// for( x = 0; x < MODELSIZE_X; x++ )
// {
// idx = y*MODELSIZE_X + x;
// //
// hv[idx] = 0.5f;
// //
// if ( y < 10*(MODELSIZE_Y/20) && y > 8*(MODELSIZE_Y/20) && x < 10*(MODELSIZE_Y/20) && x > 8*(MODELSIZE_Y/20))
// hvolt[idx] = VOLT0;
// else
// hvolt[idx] = 0.0f;
// //
// }
// }
FILE *arq;
arq = fopen("entrada.txt", "rt");
for(int i=0;i<MODELSIZE_X;i++)
for(int j=0;j<MODELSIZE_Y;j++)
{
hv[i+j*MODELSIZE_X] = 0.5f;
int temp;
fscanf(arq," %d",&temp);
hvolt[i+j*MODELSIZE_X] = temp;
}
fclose(arq);
// FILE *prof;
// char fpname[100];
// sprintf(fpname, "./profiles_%d_k2D_shared.csv",MODELSIZE_X);
// prof = fopen(fpname,"w");
// fprintf(prof,"index,timestep,P\n");
// fprintf(prof,"0,%6.4f",0.0);
// fclose(prof);
dim3 point;
//int pointIdx;
point.x = MODELSIZE_X/2;
point.y = MODELSIZE_Y/2;
point.z = 0;
// pointIdx = point.y*MODELSIZE_X + point.x;
//fprintf(prof,",%6.4f\n",hvolt[pointIdx]);
float *dvoltA, *dvoltB, *dv;
HANDLE_ERROR( cudaMalloc( (void**)&dvoltA, MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dvoltB, MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dv , MODELSIZE2D*sizeof(float) ) );
HANDLE_ERROR( cudaMemcpy( dvoltA, hvolt, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dvoltB, hvolt, MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dv , hv , MODELSIZE2D*sizeof(float), cudaMemcpyHostToDevice ) );
free( hv );
dim3 blocks(GRIDDIM_X,GRIDDIM_Y,GRIDDIM_Z);
dim3 threads(BLOCKDIM_X,BLOCKDIM_Y,BLOCKDIM_Z);
//int nsamples = (nsteps >= 2000)*2000 + (nsteps < 2000)*nsteps;
//int j = nsteps/nsamples;
cudaDeviceSynchronize();
cudaEventRecord( dstart, 0 );
int i=0;
for (i = 0; i < nsteps; i++ )
{
if ( (i%2) == 0 ) //==> EVEN
timeStep<<<blocks, threads>>>( dvoltA, dv, dvoltB );
else //==> ODD
timeStep<<<blocks, threads>>>( dvoltB, dv, dvoltA );
//
/*if ( (i%j) == 0 ) {
if ( (i%2) == 0 ) //==> EVEN
HANDLE_ERROR( cudaMemcpy( hvolt, dvoltB, MODELSIZE3D*sizeof(float), cudaMemcpyDeviceToHost ) );
else //==> ODD
HANDLE_ERROR( cudaMemcpy( hvolt, dvoltA, MODELSIZE3D*sizeof(float), cudaMemcpyDeviceToHost ) );
//
fprintf(prof,"%d,%6.4f,%6.4f\n", (i+1), ((i+1)*DT), hvolt[pointIdx]);
}*/
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
}
cudaDeviceSynchronize();
cudaEventRecord( dstop, 0 );
cudaEventSynchronize ( dstop );
float elapsed;
cudaEventElapsedTime( &elapsed, dstart, dstop );
//printf("GPU elapsed time: %f s (%f milliseconds)\n", (elapsed/1000.0), elapsed);
//arq = fopen("TempoExecucaoOrig12000.txt", "a");
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
//fprintf (arq,"[%d,%.5f],\n",MODEL_WIDTH,elapsed);
printf ("[%d,%.5f]",0,elapsed);
//fclose(arq);
// if ( (i%2) == 0 )
// HANDLE_ERROR( cudaMemcpy( hvolt, dvoltA, MODELSIZE2D*sizeof(float), cudaMemcpyDeviceToHost ) );
// else
// HANDLE_ERROR( cudaMemcpy( hvolt, dvoltB, MODELSIZE2D*sizeof(float), cudaMemcpyDeviceToHost ) );
// arq = fopen("resultado.txt", "wt");
// for(int i=0;i<MODELSIZE_X;i++)
// {
// for(int j=0;j<MODELSIZE_Y;j++)
// {
// fprintf(arq," %6.4f",hvolt[i+j*MODELSIZE_X]);
// }
// fprintf(arq,"\n");
// }
// fclose(arq);
//fclose( prof );
free( hvolt );
cudaFree( dvoltA );
cudaFree( dvoltB );
cudaFree( dv );
//
// cudaDeviceSynchronize();
// gettimeofday(&timecheck, NULL);
// end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
//printf("CPU elapsed time: %f s (%ld milliseconds)\n", ((end - start)/1000.0), (end - start));
//
cudaEventDestroy( dstart );
cudaEventDestroy( dstop );
cudaDeviceReset();
//
return 0;
}
|
6f17adde5485d766e034936cd01a3b8720ca7fb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <iostream>
#ifdef __cplusplus
extern "C" {
#endif
#include <float.h>
#include <stdio.h>
#include "highway_lstm_kernel.h"
#define BLOCK 256
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float dsigmoidf(float in) {
float s = sigmoidf(in);
return s * (1.f - s);
}
__forceinline__ __device__ float tanh2f(float in) {
float t = tanhf(in);
return t*t;
}
__global__ void elementWise_bp(int hiddenSize, int miniBatch, int numCovered,
// Inputs
float *out_grad,
float *h_out_grad,
float *c_out_grad,
float *c_in,
float *c_out,
float *h_out,
float *gates_out,
float *dropout_in,
// Outputs
float *c_in_grad,
float *i_gates_grad,
float *h_gates_grad,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float d_h = out_grad[index] + h_out_grad[index];
d_h = d_h * dropout_in[index];
float in_gate = gates_out[i_gateIndex];
float forget_gate = gates_out[i_gateIndex + 1 * hiddenSize];
float act_gate = gates_out[i_gateIndex + 2 * hiddenSize];
float out_gate = gates_out[i_gateIndex + 3 * hiddenSize];
float r_gate = gates_out[i_gateIndex + 4 * hiddenSize];
float lin_gate = gates_out[i_gateIndex + 5 * hiddenSize];
float d_out = d_h * r_gate;
float d_c = d_out * out_gate * (1.f - tanh2f(c_out[index])) + c_out_grad[index];
float h_prime = out_gate * tanhf(c_out[index]);
float d_in_gate = d_c * act_gate * in_gate * (1.f - in_gate);
float d_forget_gate = d_c * c_in[index] * forget_gate * (1.f - forget_gate);
float d_act_gate = d_c * in_gate * (1.f - act_gate * act_gate);
float d_out_gate = d_out * tanhf(c_out[index]) * out_gate * (1.f - out_gate);
float d_r_gate = d_h * (h_prime - lin_gate) * r_gate * (1.f - r_gate);
float d_lin_gate = d_h * (1 - r_gate);
i_gates_grad[i_gateIndex] = d_in_gate;
i_gates_grad[i_gateIndex + 1 * hiddenSize] = d_forget_gate;
i_gates_grad[i_gateIndex + 2 * hiddenSize] = d_act_gate;
i_gates_grad[i_gateIndex + 3 * hiddenSize] = d_out_gate;
i_gates_grad[i_gateIndex + 4 * hiddenSize] = d_r_gate;
i_gates_grad[i_gateIndex + 5 * hiddenSize] = d_lin_gate;
h_gates_grad[h_gateIndex] = d_in_gate;
h_gates_grad[h_gateIndex + 1 * hiddenSize] = d_forget_gate;
h_gates_grad[h_gateIndex + 2 * hiddenSize] = d_act_gate;
h_gates_grad[h_gateIndex + 3 * hiddenSize] = d_out_gate;
h_gates_grad[h_gateIndex + 4 * hiddenSize] = d_r_gate;
c_in_grad[index] = forget_gate * d_c;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch, int numCovered,
float *tmp_h,
float *tmp_i,
float *bias,
float *linearGates,
float *h_out,
float *dropout_in,
float *c_in,
float *c_out,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float g[6];
for (int i = 0; i < 5; i++) {
g[i] = tmp_i[i * hiddenSize + i_gateIndex] + tmp_h[i * hiddenSize + h_gateIndex];
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
// extra for highway
g[5] = tmp_i[5 * hiddenSize + i_gateIndex];
float in_gate = sigmoidf(g[0]);
float forget_gate = sigmoidf(g[1]);
float act_gate = tanhf(g[2]);
float out_gate = sigmoidf(g[3]);
float r_gate = sigmoidf(g[4]);
float lin_gate = g[5];
if (training == 1) {
linearGates[i_gateIndex] = in_gate;
linearGates[i_gateIndex + 1 * hiddenSize] = forget_gate;
linearGates[i_gateIndex + 2 * hiddenSize] = act_gate;
linearGates[i_gateIndex + 3 * hiddenSize] = out_gate;
linearGates[i_gateIndex + 4 * hiddenSize] = r_gate;
linearGates[i_gateIndex + 5 * hiddenSize] = lin_gate;
}
float val = (forget_gate * c_in[index]) + (in_gate * act_gate);
c_out[index] = val;
val = out_gate * tanhf(val);
val = val * r_gate + (1. - r_gate) * lin_gate;
val = val * dropout_in[index];
h_out[index] = val;
}
void highway_lstm_backward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *out_grad, int *lengths,
float *h_data_grad, float * c_data_grad, float *x, float *h_data,
float *c_data, float *T,
float *gates_out, float *dropout_in, float *h_gates_grad,
float *i_gates_grad, float *h_out_grad, float *x_grad, float *T_grad, float *bias_grad,
int isTraining, int do_weight_grad, hipStream_t stream, hipblasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
hipStream_t stream_i;
hipStream_t stream_h;
hipStream_t stream_wi;
hipStream_t stream_wh;
hipStream_t stream_wb;
cudaErrCheck(hipStreamCreate(&stream_i));
cudaErrCheck(hipStreamCreate(&stream_h));
cudaErrCheck(hipStreamCreate(&stream_wi));
cudaErrCheck(hipStreamCreate(&stream_wh));
cudaErrCheck(hipStreamCreate(&stream_wb));
float one = 1.f;
float zero = 0.f;
float *ones_host = new float[miniBatch];
for (int i=0; i < miniBatch; i++) {
ones_host[i] = 1.f;
}
float *ones;
cudaErrCheck(hipMalloc((void**)&ones, miniBatch * sizeof(float)));
cudaErrCheck(hipMemcpy(ones, ones_host, miniBatch * sizeof(float), hipMemcpyHostToDevice));
for (int layer = numLayers-1; layer >= 0; layer--) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
} else {
// backward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
}
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
int prevGradIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevGradIndex = t;
prevIndex = (t+2)%(seqLength+1);
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevGradIndex = (t+2)%(seqLength+1);
prevIndex = t;
}
float * gradPtr;
if (layer == numLayers-1) {
gradPtr = out_grad + t * numElements;
} else {
gradPtr = h_out_grad + t * numElements + layer * seqLength * numElements;
}
cublasErrCheck(hipblasSetStream(handle, stream_i));
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_bp) , dim3(gridDim), dim3(blockDim) , 0, stream,
hiddenSize, miniBatch, currNumCovered,
gradPtr,
h_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
h_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
gates_out + t * 6 * numElements + layer * seqLength * 6 * numElements,
dropout_in + layer * numElements,
c_data_grad + (t+1) * numElements + layer * (seqLength + 1) * numElements,
i_gates_grad,
h_gates_grad,
isTraining);
cudaErrCheck(hipGetLastError());
// END
cudaErrCheck(hipDeviceSynchronize());
float *out_grad_ptr;
int weightStart;
int inSize;
if (layer == 0) {
inSize = inputSize;
out_grad_ptr = x_grad + t * inputSize * miniBatch;
weightStart = 0;
} else {
inSize = hiddenSize;
out_grad_ptr = h_out_grad + t * numElements + (layer-1) * seqLength * numElements;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
}
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
inSize, currNumCovered, 6*hiddenSize,
&one,
&T[weightStart],
6 * hiddenSize,
i_gates_grad,
6 * hiddenSize,
&zero,
out_grad_ptr,
inSize));
cublasErrCheck(hipblasSetStream(handle, stream_h));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
hiddenSize, currNumCovered, 5*hiddenSize,
&one,
&T[weightStart + 6*hiddenSize*inSize],
5 * hiddenSize,
h_gates_grad,
5 * hiddenSize,
&zero,
h_data_grad + (t+1) * numElements + layer * (seqLength+1) * numElements,
hiddenSize));
if (do_weight_grad == 1) {
float *inputPtr;
if (layer == 0) {
inputPtr = x + t * inputSize * miniBatch;
} else {
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(hipblasSetStream(handle, stream_wi));
// Update i_weights
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
6 * hiddenSize, inSize, currNumCovered,
&one,
i_gates_grad,
6 * hiddenSize,
inputPtr,
inSize,
&one,
&T_grad[weightStart],
6 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_wh));
// Update h_weights
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
5 * hiddenSize, hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength+1) * numElements,
hiddenSize,
&one,
&T_grad[weightStart + 6 *hiddenSize*inSize],
5 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_wb));
// Update bias_weights
cublasErrCheck(hipblasSgemv(handle,
HIPBLAS_OP_N,
5 * hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
ones,
1,
&one,
&bias_grad[layer * 5 * hiddenSize],
1));
}
cudaErrCheck(hipDeviceSynchronize());
}
}
cublasErrCheck(hipblasSetStream(handle, stream));
cudaErrCheck(hipStreamDestroy(stream_i));
cudaErrCheck(hipStreamDestroy(stream_h));
cudaErrCheck(hipStreamDestroy(stream_wi));
cudaErrCheck(hipStreamDestroy(stream_wh));
cudaErrCheck(hipStreamDestroy(stream_wb));
cudaErrCheck(hipFree(ones));
delete [] ones_host;
cudaErrCheck(hipDeviceSynchronize());
}
void highway_lstm_forward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *x, int *lengths, float *h_data,
float *c_data, float *tmp_i, float *tmp_h, float *T, float *bias,
float *dropout, float *gates, int is_training, hipStream_t stream, hipblasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
float zero = 0.f;
float one = 1.f;
hipStream_t stream_i;
hipStream_t stream_h;
cudaErrCheck(hipStreamCreate(&stream_i));
cudaErrCheck(hipStreamCreate(&stream_h));
for (int layer = 0; layer < numLayers; layer++) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
} else {
// backward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
}
cublasErrCheck(hipblasSetStream(handle, stream));
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevIndex = t;
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevIndex = (t+2)%(seqLength+1);
}
int inSize;
int weightStart;
float *inputPtr;
if (layer == 0) {
inSize = inputSize;
weightStart = 0;
inputPtr = x + t * inputSize * miniBatch;
prevIndex = t;
} else {
inSize = hiddenSize;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(hipblasSetStream(handle, stream_i));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
6*hiddenSize, currNumCovered, inSize,
&one,
&T[weightStart],
6 * hiddenSize,
inputPtr,
inSize,
&zero,
tmp_i,
6 * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_h));
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
5*hiddenSize, currNumCovered, hiddenSize,
&one,
&T[6 * hiddenSize * inSize + weightStart],
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&zero,
tmp_h,
5 * hiddenSize));
cudaErrCheck(hipDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_fp) , dim3(gridDim), dim3(blockDim) , 0, stream,
hiddenSize, miniBatch, currNumCovered,
tmp_h,
tmp_i,
bias + 5 * layer * hiddenSize,
is_training ? gates + 6 * (t * numElements + layer * seqLength * numElements) : NULL,
h_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
dropout + layer * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
is_training);
cudaErrCheck(hipGetLastError());
cudaErrCheck(hipDeviceSynchronize());
}
}
cublasErrCheck(hipblasSetStream(handle, stream));
cudaErrCheck(hipStreamDestroy(stream_i));
cudaErrCheck(hipStreamDestroy(stream_h));
cudaErrCheck(hipDeviceSynchronize());
}
#ifdef __cplusplus
}
#endif
| 6f17adde5485d766e034936cd01a3b8720ca7fb5.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <iostream>
#ifdef __cplusplus
extern "C" {
#endif
#include <float.h>
#include <stdio.h>
#include "highway_lstm_kernel.h"
#define BLOCK 256
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float dsigmoidf(float in) {
float s = sigmoidf(in);
return s * (1.f - s);
}
__forceinline__ __device__ float tanh2f(float in) {
float t = tanhf(in);
return t*t;
}
__global__ void elementWise_bp(int hiddenSize, int miniBatch, int numCovered,
// Inputs
float *out_grad,
float *h_out_grad,
float *c_out_grad,
float *c_in,
float *c_out,
float *h_out,
float *gates_out,
float *dropout_in,
// Outputs
float *c_in_grad,
float *i_gates_grad,
float *h_gates_grad,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float d_h = out_grad[index] + h_out_grad[index];
d_h = d_h * dropout_in[index];
float in_gate = gates_out[i_gateIndex];
float forget_gate = gates_out[i_gateIndex + 1 * hiddenSize];
float act_gate = gates_out[i_gateIndex + 2 * hiddenSize];
float out_gate = gates_out[i_gateIndex + 3 * hiddenSize];
float r_gate = gates_out[i_gateIndex + 4 * hiddenSize];
float lin_gate = gates_out[i_gateIndex + 5 * hiddenSize];
float d_out = d_h * r_gate;
float d_c = d_out * out_gate * (1.f - tanh2f(c_out[index])) + c_out_grad[index];
float h_prime = out_gate * tanhf(c_out[index]);
float d_in_gate = d_c * act_gate * in_gate * (1.f - in_gate);
float d_forget_gate = d_c * c_in[index] * forget_gate * (1.f - forget_gate);
float d_act_gate = d_c * in_gate * (1.f - act_gate * act_gate);
float d_out_gate = d_out * tanhf(c_out[index]) * out_gate * (1.f - out_gate);
float d_r_gate = d_h * (h_prime - lin_gate) * r_gate * (1.f - r_gate);
float d_lin_gate = d_h * (1 - r_gate);
i_gates_grad[i_gateIndex] = d_in_gate;
i_gates_grad[i_gateIndex + 1 * hiddenSize] = d_forget_gate;
i_gates_grad[i_gateIndex + 2 * hiddenSize] = d_act_gate;
i_gates_grad[i_gateIndex + 3 * hiddenSize] = d_out_gate;
i_gates_grad[i_gateIndex + 4 * hiddenSize] = d_r_gate;
i_gates_grad[i_gateIndex + 5 * hiddenSize] = d_lin_gate;
h_gates_grad[h_gateIndex] = d_in_gate;
h_gates_grad[h_gateIndex + 1 * hiddenSize] = d_forget_gate;
h_gates_grad[h_gateIndex + 2 * hiddenSize] = d_act_gate;
h_gates_grad[h_gateIndex + 3 * hiddenSize] = d_out_gate;
h_gates_grad[h_gateIndex + 4 * hiddenSize] = d_r_gate;
c_in_grad[index] = forget_gate * d_c;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch, int numCovered,
float *tmp_h,
float *tmp_i,
float *bias,
float *linearGates,
float *h_out,
float *dropout_in,
float *c_in,
float *c_out,
int training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numCovered * hiddenSize) return;
int batch = index / hiddenSize;
int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize;
int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize;
float g[6];
for (int i = 0; i < 5; i++) {
g[i] = tmp_i[i * hiddenSize + i_gateIndex] + tmp_h[i * hiddenSize + h_gateIndex];
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
// extra for highway
g[5] = tmp_i[5 * hiddenSize + i_gateIndex];
float in_gate = sigmoidf(g[0]);
float forget_gate = sigmoidf(g[1]);
float act_gate = tanhf(g[2]);
float out_gate = sigmoidf(g[3]);
float r_gate = sigmoidf(g[4]);
float lin_gate = g[5];
if (training == 1) {
linearGates[i_gateIndex] = in_gate;
linearGates[i_gateIndex + 1 * hiddenSize] = forget_gate;
linearGates[i_gateIndex + 2 * hiddenSize] = act_gate;
linearGates[i_gateIndex + 3 * hiddenSize] = out_gate;
linearGates[i_gateIndex + 4 * hiddenSize] = r_gate;
linearGates[i_gateIndex + 5 * hiddenSize] = lin_gate;
}
float val = (forget_gate * c_in[index]) + (in_gate * act_gate);
c_out[index] = val;
val = out_gate * tanhf(val);
val = val * r_gate + (1. - r_gate) * lin_gate;
val = val * dropout_in[index];
h_out[index] = val;
}
void highway_lstm_backward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *out_grad, int *lengths,
float *h_data_grad, float * c_data_grad, float *x, float *h_data,
float *c_data, float *T,
float *gates_out, float *dropout_in, float *h_gates_grad,
float *i_gates_grad, float *h_out_grad, float *x_grad, float *T_grad, float *bias_grad,
int isTraining, int do_weight_grad, cudaStream_t stream, cublasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
cudaStream_t stream_i;
cudaStream_t stream_h;
cudaStream_t stream_wi;
cudaStream_t stream_wh;
cudaStream_t stream_wb;
cudaErrCheck(cudaStreamCreate(&stream_i));
cudaErrCheck(cudaStreamCreate(&stream_h));
cudaErrCheck(cudaStreamCreate(&stream_wi));
cudaErrCheck(cudaStreamCreate(&stream_wh));
cudaErrCheck(cudaStreamCreate(&stream_wb));
float one = 1.f;
float zero = 0.f;
float *ones_host = new float[miniBatch];
for (int i=0; i < miniBatch; i++) {
ones_host[i] = 1.f;
}
float *ones;
cudaErrCheck(cudaMalloc((void**)&ones, miniBatch * sizeof(float)));
cudaErrCheck(cudaMemcpy(ones, ones_host, miniBatch * sizeof(float), cudaMemcpyHostToDevice));
for (int layer = numLayers-1; layer >= 0; layer--) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
} else {
// backward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
}
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
int prevGradIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevGradIndex = t;
prevIndex = (t+2)%(seqLength+1);
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevGradIndex = (t+2)%(seqLength+1);
prevIndex = t;
}
float * gradPtr;
if (layer == numLayers-1) {
gradPtr = out_grad + t * numElements;
} else {
gradPtr = h_out_grad + t * numElements + layer * seqLength * numElements;
}
cublasErrCheck(cublasSetStream(handle, stream_i));
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
elementWise_bp <<< gridDim, blockDim , 0, stream>>>
(hiddenSize, miniBatch, currNumCovered,
gradPtr,
h_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
h_data + (t+1) * numElements + layer * (seqLength + 1) * numElements,
gates_out + t * 6 * numElements + layer * seqLength * 6 * numElements,
dropout_in + layer * numElements,
c_data_grad + (t+1) * numElements + layer * (seqLength + 1) * numElements,
i_gates_grad,
h_gates_grad,
isTraining);
cudaErrCheck(cudaGetLastError());
// END
cudaErrCheck(cudaDeviceSynchronize());
float *out_grad_ptr;
int weightStart;
int inSize;
if (layer == 0) {
inSize = inputSize;
out_grad_ptr = x_grad + t * inputSize * miniBatch;
weightStart = 0;
} else {
inSize = hiddenSize;
out_grad_ptr = h_out_grad + t * numElements + (layer-1) * seqLength * numElements;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
}
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
inSize, currNumCovered, 6*hiddenSize,
&one,
&T[weightStart],
6 * hiddenSize,
i_gates_grad,
6 * hiddenSize,
&zero,
out_grad_ptr,
inSize));
cublasErrCheck(cublasSetStream(handle, stream_h));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
hiddenSize, currNumCovered, 5*hiddenSize,
&one,
&T[weightStart + 6*hiddenSize*inSize],
5 * hiddenSize,
h_gates_grad,
5 * hiddenSize,
&zero,
h_data_grad + (t+1) * numElements + layer * (seqLength+1) * numElements,
hiddenSize));
if (do_weight_grad == 1) {
float *inputPtr;
if (layer == 0) {
inputPtr = x + t * inputSize * miniBatch;
} else {
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(cublasSetStream(handle, stream_wi));
// Update i_weights
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
6 * hiddenSize, inSize, currNumCovered,
&one,
i_gates_grad,
6 * hiddenSize,
inputPtr,
inSize,
&one,
&T_grad[weightStart],
6 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_wh));
// Update h_weights
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
5 * hiddenSize, hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength+1) * numElements,
hiddenSize,
&one,
&T_grad[weightStart + 6 *hiddenSize*inSize],
5 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_wb));
// Update bias_weights
cublasErrCheck(cublasSgemv(handle,
CUBLAS_OP_N,
5 * hiddenSize, currNumCovered,
&one,
h_gates_grad,
5 * hiddenSize,
ones,
1,
&one,
&bias_grad[layer * 5 * hiddenSize],
1));
}
cudaErrCheck(cudaDeviceSynchronize());
}
}
cublasErrCheck(cublasSetStream(handle, stream));
cudaErrCheck(cudaStreamDestroy(stream_i));
cudaErrCheck(cudaStreamDestroy(stream_h));
cudaErrCheck(cudaStreamDestroy(stream_wi));
cudaErrCheck(cudaStreamDestroy(stream_wh));
cudaErrCheck(cudaStreamDestroy(stream_wb));
cudaErrCheck(cudaFree(ones));
delete [] ones_host;
cudaErrCheck(cudaDeviceSynchronize());
}
void highway_lstm_forward_ongpu(int inputSize, int hiddenSize, int miniBatch,
int numLayers, int seqLength, float *x, int *lengths, float *h_data,
float *c_data, float *tmp_i, float *tmp_h, float *T, float *bias,
float *dropout, float *gates, int is_training, cudaStream_t stream, cublasHandle_t handle) {
const int numElements = hiddenSize * miniBatch;
float zero = 0.f;
float one = 1.f;
cudaStream_t stream_i;
cudaStream_t stream_h;
cudaErrCheck(cudaStreamCreate(&stream_i));
cudaErrCheck(cudaStreamCreate(&stream_h));
for (int layer = 0; layer < numLayers; layer++) {
int direction;
int startInd;
int currNumCovered;
if (layer % 2 == 0) {
// forward direction
direction = 1;
startInd = 0;
currNumCovered = miniBatch;
} else {
// backward direction
direction = -1;
startInd = seqLength-1;
currNumCovered = 0;
}
cublasErrCheck(cublasSetStream(handle, stream));
for (int t = startInd; t < seqLength && t >= 0; t = t + direction) {
int prevIndex;
if (direction == 1) {
while (lengths[currNumCovered-1] <= t) {
currNumCovered--;
}
prevIndex = t;
} else {
while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) {
currNumCovered++;
}
prevIndex = (t+2)%(seqLength+1);
}
int inSize;
int weightStart;
float *inputPtr;
if (layer == 0) {
inSize = inputSize;
weightStart = 0;
inputPtr = x + t * inputSize * miniBatch;
prevIndex = t;
} else {
inSize = hiddenSize;
weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize;
inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements;
}
cublasErrCheck(cublasSetStream(handle, stream_i));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
6*hiddenSize, currNumCovered, inSize,
&one,
&T[weightStart],
6 * hiddenSize,
inputPtr,
inSize,
&zero,
tmp_i,
6 * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_h));
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
5*hiddenSize, currNumCovered, hiddenSize,
&one,
&T[6 * hiddenSize * inSize + weightStart],
5 * hiddenSize,
h_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&zero,
tmp_h,
5 * hiddenSize));
cudaErrCheck(cudaDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = BLOCK;
gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x;
elementWise_fp <<< gridDim, blockDim , 0, stream>>>
(hiddenSize, miniBatch, currNumCovered,
tmp_h,
tmp_i,
bias + 5 * layer * hiddenSize,
is_training ? gates + 6 * (t * numElements + layer * seqLength * numElements) : NULL,
h_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
dropout + layer * numElements,
c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements,
c_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements,
is_training);
cudaErrCheck(cudaGetLastError());
cudaErrCheck(cudaDeviceSynchronize());
}
}
cublasErrCheck(cublasSetStream(handle, stream));
cudaErrCheck(cudaStreamDestroy(stream_i));
cudaErrCheck(cudaStreamDestroy(stream_h));
cudaErrCheck(cudaDeviceSynchronize());
}
#ifdef __cplusplus
}
#endif
|
c984e91dfd0251e42e2b9d39b93c9668e3b3bdf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
namespace at { namespace native {
// Implement as functors since lambdas don't get optimized.
class ReduceMultiply {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
gpuAtomicMul(self_data, *src_data);
}
};
static ReduceMultiply reduce_multiply;
class ReduceAdd {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
gpuAtomicAddNoReturn(self_data, *src_data);
}
};
static ReduceAdd reduce_add;
class TensorAssign {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
*self_data = *src_data;
}
};
static TensorAssign tensor_assign;
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
const dim3 block(nt);
const dim3 grid((N + block.x * vt - 1) / (block.x * vt));
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const ReduceMultiply& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_gather_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
(scalar_t*)&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_fill_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const ReduceMultiply& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", tensor_assign);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", tensor_assign);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Scalar& src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", tensor_assign);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("scatter_add_cuda_kernel");
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", reduce_add);
}
void scatter_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index,
const Tensor& src, const SCATTER_GATHER_OP& reduce) {
switch (reduce) {
case SCATTER_GATHER_OP::REDUCE_ADD :
cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src,
"scatter_reduce_cuda_add_", reduce_add);
break;
case SCATTER_GATHER_OP::REDUCE_MULTIPLY :
cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src,
"scatter_reduce_cuda_multiply_", reduce_multiply);
break;
}
}
void scatter_scalar_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index,
const Scalar& value, const SCATTER_GATHER_OP& reduce) {
switch (reduce) {
case SCATTER_GATHER_OP::REDUCE_ADD :
cuda_scatter_fill_base_kernel<false>()(self, dim, index, value,
"scatter_fill_cuda_add_", reduce_add);
break;
case SCATTER_GATHER_OP::REDUCE_MULTIPLY :
cuda_scatter_fill_base_kernel<false>()(self, dim, index, value,
"scatter_fill_cuda_multiply_", reduce_multiply);
break;
}
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
REGISTER_DISPATCH(scatter_reduce_stub, &scatter_reduce_cuda_kernel);
REGISTER_DISPATCH(scatter_scalar_reduce_stub, &scatter_scalar_reduce_cuda_kernel);
}} // namespace at::native
| c984e91dfd0251e42e2b9d39b93c9668e3b3bdf3.cu | #include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
namespace at { namespace native {
// Implement as functors since lambdas don't get optimized.
class ReduceMultiply {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
gpuAtomicMul(self_data, *src_data);
}
};
static ReduceMultiply reduce_multiply;
class ReduceAdd {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
gpuAtomicAddNoReturn(self_data, *src_data);
}
};
static ReduceAdd reduce_add;
class TensorAssign {
public:
template <typename scalar_t>
constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const {
*self_data = *src_data;
}
};
static TensorAssign tensor_assign;
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
const dim3 block(nt);
const dim3 grid((N + block.x * vt - 1) / (block.x * vt));
const auto stream = at::cuda::getCurrentCUDAStream();
_scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const ReduceMultiply& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(src_restrided)
.add_input(index)
.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_gather_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
(scalar_t*)&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_fill_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const ReduceMultiply& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
at::assert_no_internal_overlap(self);
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self_restrided)
.add_input(index)
.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", tensor_assign);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", tensor_assign);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Scalar& src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", tensor_assign);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("scatter_add_cuda_kernel");
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", reduce_add);
}
void scatter_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index,
const Tensor& src, const SCATTER_GATHER_OP& reduce) {
switch (reduce) {
case SCATTER_GATHER_OP::REDUCE_ADD :
cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src,
"scatter_reduce_cuda_add_", reduce_add);
break;
case SCATTER_GATHER_OP::REDUCE_MULTIPLY :
cuda_scatter_gather_base_kernel<true, false>()(self, dim, index, src,
"scatter_reduce_cuda_multiply_", reduce_multiply);
break;
}
}
void scatter_scalar_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index,
const Scalar& value, const SCATTER_GATHER_OP& reduce) {
switch (reduce) {
case SCATTER_GATHER_OP::REDUCE_ADD :
cuda_scatter_fill_base_kernel<false>()(self, dim, index, value,
"scatter_fill_cuda_add_", reduce_add);
break;
case SCATTER_GATHER_OP::REDUCE_MULTIPLY :
cuda_scatter_fill_base_kernel<false>()(self, dim, index, value,
"scatter_fill_cuda_multiply_", reduce_multiply);
break;
}
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
REGISTER_DISPATCH(scatter_reduce_stub, &scatter_reduce_cuda_kernel);
REGISTER_DISPATCH(scatter_scalar_reduce_stub, &scatter_scalar_reduce_cuda_kernel);
}} // namespace at::native
|
91694208f1ff3ad23da5f81de20b1796e5755e14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from qmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zqmr_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
magmaDoubleComplex ztmp = z[ i+j*num_rows ] / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi,
y, z, v, w );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex pde,
magmaDoubleComplex rde,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr q )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ];
q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = y - pde * p
q = z - rde * q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
pde magmaDoubleComplex
scalar
@param[in]
rde magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in,out]
q magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex pde,
magmaDoubleComplex rde,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr q,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, pde, rde, y, z, p, q );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *pt,
magmaDoubleComplex *v,
magmaDoubleComplex *y )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp;
y[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = pt - beta * v
y = v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr y,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, y );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex *p,
magmaDoubleComplex *pt,
magmaDoubleComplex *d,
magmaDoubleComplex *s,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmpd = eta * p[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
magmaDoubleComplex tmps = eta * pt[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p;
s = eta * pt;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_5_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex pds,
magmaDoubleComplex *p,
magmaDoubleComplex *pt,
magmaDoubleComplex *d,
magmaDoubleComplex *s,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
magmaDoubleComplex tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p + pds * d;
s = eta * pt + pds * s;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
pds magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex pds,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, pds, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_6_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w,
magmaDoubleComplex *wt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex wttmp = wt[ i+j*num_rows ]
- MAGMA_Z_CONJ( beta ) * w[ i+j*num_rows ];
wt[ i+j*num_rows ] = wttmp;
magmaDoubleComplex ztmp = wttmp / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
magmaDoubleComplex ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
wt = wt - conj(beta) * w
v = y / rho
y = y / rho
w = wt / psi
z = wt / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
wt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_6(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr wt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_6_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, rho, psi,
y, z, v, w, wt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_7_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *pt,
magmaDoubleComplex *v,
magmaDoubleComplex *vt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
vt[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
vt = pt - beta * v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
vt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_7(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr vt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_7_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, vt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_8_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *vt,
magmaDoubleComplex *wt,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho;
v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho;
z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi;
w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in]
vt magmaDoubleComplex_ptr
vector
@param[in]
wt magmaDoubleComplex_ptr
vector
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_8(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr vt,
magmaDoubleComplex_ptr wt,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zqmr_8_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi,
vt, wt, y, z, v, w );
return MAGMA_SUCCESS;
}
| 91694208f1ff3ad23da5f81de20b1796e5755e14.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from qmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zqmr_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
magmaDoubleComplex ztmp = z[ i+j*num_rows ] / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi,
y, z, v, w );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex pde,
magmaDoubleComplex rde,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr q )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ];
q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = y - pde * p
q = z - rde * q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
pde magmaDoubleComplex
scalar
@param[in]
rde magmaDoubleComplex
scalar
@param[in]
y magmaDoubleComplex_ptr
vector
@param[in]
z magmaDoubleComplex_ptr
vector
@param[in,out]
p magmaDoubleComplex_ptr
vector
@param[in,out]
q magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex pde,
magmaDoubleComplex rde,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr q,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, pde, rde, y, z, p, q );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_3_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *pt,
magmaDoubleComplex *v,
magmaDoubleComplex *y )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp;
y[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = pt - beta * v
y = v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr y,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, y );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_4_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex *p,
magmaDoubleComplex *pt,
magmaDoubleComplex *d,
magmaDoubleComplex *s,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmpd = eta * p[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
magmaDoubleComplex tmps = eta * pt[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p;
s = eta * pt;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_5_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex pds,
magmaDoubleComplex *p,
magmaDoubleComplex *pt,
magmaDoubleComplex *d,
magmaDoubleComplex *s,
magmaDoubleComplex *x,
magmaDoubleComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
magmaDoubleComplex tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p + pds * d;
s = eta * pt + pds * s;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta magmaDoubleComplex
scalar
@param[in]
pds magmaDoubleComplex
scalar
@param[in]
p magmaDoubleComplex_ptr
vector
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
d magmaDoubleComplex_ptr
vector
@param[in,out]
s magmaDoubleComplex_ptr
vector
@param[in,out]
x magmaDoubleComplex_ptr
vector
@param[in,out]
r magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex eta,
magmaDoubleComplex pds,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, pds, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_6_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w,
magmaDoubleComplex *wt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex wttmp = wt[ i+j*num_rows ]
- MAGMA_Z_CONJ( beta ) * w[ i+j*num_rows ];
wt[ i+j*num_rows ] = wttmp;
magmaDoubleComplex ztmp = wttmp / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
magmaDoubleComplex ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
wt = wt - conj(beta) * w
v = y / rho
y = y / rho
w = wt / psi
z = wt / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in,out]
wt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_6(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magmaDoubleComplex_ptr wt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_6_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, rho, psi,
y, z, v, w, wt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_7_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex *pt,
magmaDoubleComplex *v,
magmaDoubleComplex *vt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaDoubleComplex tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
vt[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
vt = pt - beta * v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaDoubleComplex
scalar
@param[in]
pt magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
vt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_7(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr pt,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr vt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_7_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, vt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zqmr_8_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex *vt,
magmaDoubleComplex *wt,
magmaDoubleComplex *y,
magmaDoubleComplex *z,
magmaDoubleComplex *v,
magmaDoubleComplex *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho;
v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho;
z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi;
w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho magmaDoubleComplex
scalar
@param[in]
psi magmaDoubleComplex
scalar
@param[in]
vt magmaDoubleComplex_ptr
vector
@param[in]
wt magmaDoubleComplex_ptr
vector
@param[in,out]
y magmaDoubleComplex_ptr
vector
@param[in,out]
z magmaDoubleComplex_ptr
vector
@param[in,out]
v magmaDoubleComplex_ptr
vector
@param[in,out]
w magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zqmr_8(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex rho,
magmaDoubleComplex psi,
magmaDoubleComplex_ptr vt,
magmaDoubleComplex_ptr wt,
magmaDoubleComplex_ptr y,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zqmr_8_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi,
vt, wt, y, z, v, w );
return MAGMA_SUCCESS;
}
|
07f47142e9843752e35975ad50522ee898004a37.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2011 Andreas Muetzel ([email protected]). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#include "kdtree_cuda_3d_index.h"
#include <flann/algorithms/dist.h>
#include <flann/util/cuda/result_set.h>
// #define THRUST_DEBUG 1
#include <hip/hip_runtime.h>
#include <thrust/gather.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <hip/hip_vector_types.h>
#include <flann/util/cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <flann/util/cuda/heap.h>
#include <thrust/scan.h>
#include <thrust/count.h>
#include <thrust/gather.h>
#include <flann/algorithms/kdtree_cuda_builder.h>
#include <hip/hip_vector_types.h>
namespace flann
{
namespace KdTreeCudaPrivate
{
template< typename GPUResultSet, typename Distance >
__device__
void searchNeighbors(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbLow,
const float4* aabbHigh, const float4* elements, const float4& q, GPUResultSet& result, const Distance& distance = Distance() )
{
bool backtrack=false;
int lastNode=-1;
int current=0;
cuda::kd_tree_builder_detail::SplitInfo split;
while(true) {
if( current==-1 ) break;
split = splits[current];
float diff1;
if( split.split_dim==0 ) diff1=q.x- split.split_val;
else if( split.split_dim==1 ) diff1=q.y- split.split_val;
else if( split.split_dim==2 ) diff1=q.z- split.split_val;
// children are next to each other: leftChild+1 == rightChild
int leftChild= child1[current];
int bestChild=leftChild;
int otherChild=leftChild;
if ((diff1)<0) {
otherChild++;
}
else {
bestChild++;
}
if( !backtrack ) {
/* If this is a leaf node, then do check and return. */
if (leftChild==-1) {
for (int i=split.left; i<split.right; ++i) {
float dist=distance.dist(elements[i],q);
result.insert(i,dist);
}
backtrack=true;
lastNode=current;
current=parent[current];
}
else { // go to closer child node
lastNode=current;
current=bestChild;
}
}
else { // continue moving back up the tree or visit far node?
// minimum possible distance between query point and a point inside the AABB
float mindistsq=0;
float4 aabbMin=aabbLow[otherChild];
float4 aabbMax=aabbHigh[otherChild];
if( q.x < aabbMin.x ) mindistsq+=distance.axisDist(q.x, aabbMin.x);
else if( q.x > aabbMax.x ) mindistsq+=distance.axisDist(q.x, aabbMax.x);
if( q.y < aabbMin.y ) mindistsq+=distance.axisDist(q.y, aabbMin.y);
else if( q.y > aabbMax.y ) mindistsq+=distance.axisDist(q.y, aabbMax.y);
if( q.z < aabbMin.z ) mindistsq+=distance.axisDist(q.z, aabbMin.z);
else if( q.z > aabbMax.z ) mindistsq+=distance.axisDist(q.z, aabbMax.z);
// the far node was NOT the last node (== not visited yet) AND there could be a closer point in it
if(( lastNode==bestChild) && (mindistsq <= result.worstDist() ) ) {
lastNode=current;
current=otherChild;
backtrack=false;
}
else {
lastNode=current;
current=parent[current];
}
}
}
}
template< typename GPUResultSet, typename Distance >
__global__
void nearestKernel(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbMin,
const float4* aabbMax, const float4* elements, const float* query, int stride, int resultStride, int* resultIndex, float* resultDist, int querysize, GPUResultSet result, Distance dist = Distance())
{
typedef float DistanceType;
typedef float ElementType;
// typedef DistanceType float;
size_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if( tid >= querysize ) return;
float4 q = make_float4(query[tid*stride],query[tid*stride+1],query[tid*stride+2],0);
result.setResultLocation( resultDist, resultIndex, tid, resultStride );
searchNeighbors(splits,child1,parent,aabbMin,aabbMax,elements, q, result, dist);
result.finish();
}
}
//! contains some pointers that use cuda data types and that cannot be easily
//! forward-declared.
//! basically it contains all GPU buffers
template<typename Distance>
struct KDTreeCuda3dIndex<Distance>::GpuHelper
{
thrust::device_vector< cuda::kd_tree_builder_detail::SplitInfo >* gpu_splits_;
thrust::device_vector< int >* gpu_parent_;
thrust::device_vector< int >* gpu_child1_;
thrust::device_vector< float4 >* gpu_aabb_min_;
thrust::device_vector< float4 >* gpu_aabb_max_;
thrust::device_vector<float4>* gpu_points_;
thrust::device_vector<int>* gpu_vind_;
GpuHelper() : gpu_splits_(0), gpu_parent_(0), gpu_child1_(0), gpu_aabb_min_(0), gpu_aabb_max_(0), gpu_points_(0), gpu_vind_(0){
}
~GpuHelper()
{
delete gpu_splits_;
gpu_splits_=0;
delete gpu_parent_;
gpu_parent_=0;
delete gpu_child1_;
gpu_child1_=0;
delete gpu_aabb_max_;
gpu_aabb_max_=0;
delete gpu_aabb_min_;
gpu_aabb_min_=0;
delete gpu_vind_;
gpu_vind_=0;
delete gpu_points_;
gpu_points_=0;
}
};
//! thrust transform functor
//! transforms indices in the internal data set back to the original indices
struct map_indices
{
const int* v_;
map_indices(const int* v) : v_(v) {
}
__host__ __device__
float operator() (const int&i) const
{
if( i>= 0 ) return v_[i];
else return i;
}
};
//! implementation of L2 distance for the CUDA kernels
struct CudaL2
{
static float
__host__ __device__
axisDist( float a, float b )
{
return (a-b)*(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
float4 diff = a-b;
return dot(diff,diff);
}
};
//! implementation of L1 distance for the CUDA kernels
//! NOT TESTED!
struct CudaL1
{
static float
__host__ __device__
axisDist( float a, float b )
{
return fabs(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
return fabs(a.x-b.x)+fabs (a.y-b.y)+( a.z-b.z)+(a.w-b.w);
}
};
//! used to adapt CPU and GPU distance types.
//! specializations define the ::type as their corresponding GPU distance type
//! \see GpuDistance< L2<float> >, GpuDistance< L2_Simple<float> >
template< class Distance >
struct GpuDistance
{
};
template<>
struct GpuDistance< L2<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L2_Simple<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L1<float> >
{
typedef CudaL1 type;
};
template< typename Distance >
void KDTreeCuda3dIndex<Distance>::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const
{
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(int(indices.cols) >= knn);
assert( dists.cols == indices.cols && dists.stride==indices.stride );
int istride=queries.stride/sizeof(ElementType);
int ostride=indices.stride/4;
bool matrices_on_gpu = params.matrices_in_gpu_ram;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
float epsError = 1+params.eps;
bool sorted = params.sorted;
bool use_heap = params.use_heap;
typename GpuDistance<Distance>::type distance;
// std::cout<<" search: "<<std::endl;
// std::cout<<" rows: "<<indices.rows<<" "<<dists.rows<<" "<<queries.rows<<std::endl;
// std::cout<<" cols: "<<indices.cols<<" "<<dists.cols<<" "<<queries.cols<<std::endl;
// std::cout<<" stride: "<<indices.stride<<" "<<dists.stride<<" "<<queries.stride<<std::endl;
// std::cout<<" stride2:"<<istride<<" "<<ostride<<std::endl;
// std::cout<<" knn:"<<knn<<" matrices_on_gpu:"<<matrices_on_gpu<<std::endl;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
thrust::device_vector<float> distsDev(queries.rows* ostride);
thrust::device_vector<int> indicesDev(queries.rows* ostride);
if( knn==1 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.ptr() );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
}
else {
thrust::device_ptr<float> qd = thrust::device_pointer_cast(queries.ptr());
thrust::device_ptr<float> dd = thrust::device_pointer_cast(dists.ptr());
thrust::device_ptr<int> id = thrust::device_pointer_cast(indices.ptr());
if( knn==1 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::transform(id, id+knn*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
}
}
template< typename Distance>
int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const
{
// assert(indices.roasdfws >= queries.rows);
// assert(dists.rows >= queries.rows);
int max_neighbors = params.max_neighbors;
bool sorted = params.sorted;
bool use_heap = params.use_heap;
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int istride=queries.stride/sizeof(ElementType);
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
thrust::device_vector<int> countsDev(queries.rows);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&countsDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,max_neighbors),
distance
);
thrust::host_vector<int> counts_host=countsDev;
if( max_neighbors!=0 ) { // we'll need this later, but the exclusive_scan will change the array
for( size_t i=0; i<queries.rows; i++ ) {
int count = counts_host[i];
if( count > 0 ) {
indices[i].resize(count);
dists[i].resize(count);
}
else {
indices[i].clear();
dists[i].clear();
}
}
}
int neighbors_last_elem = countsDev.back();
thrust::exclusive_scan( countsDev.begin(), countsDev.end(), countsDev.begin() );
size_t total_neighbors=neighbors_last_elem+countsDev.back();
if( max_neighbors==0 ) return total_neighbors;
thrust::device_vector<int> indicesDev(total_neighbors,-1);
thrust::device_vector<float> distsDev(total_neighbors,std::numeric_limits<float>::infinity());
if( max_neighbors<0 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
}
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::host_vector<int> indices_temp = indicesDev;
thrust::host_vector<float> dists_temp = distsDev;
int buffer_index=0;
for( size_t i=0; i<queries.rows; i++ ) {
for( size_t j=0; j<counts_host[i]; j++ ) {
dists[i][j]=dists_temp[buffer_index];
indices[i][j]=indices_temp[buffer_index];
++buffer_index;
}
}
return buffer_index;
}
//! used in the radius search to count the total number of neighbors
struct isNotMinusOne
{
__host__ __device__
bool operator() ( int i ){
return i!=-1;
}
};
template< typename Distance>
int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const
{
int max_neighbors = params.max_neighbors;
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows || max_neighbors==0 );
assert(indices.stride==dists.stride || max_neighbors==0 );
assert( indices.cols==indices.stride/sizeof(int) );
assert(dists.rows >= queries.rows || max_neighbors==0 );
bool sorted = params.sorted;
bool matrices_on_gpu = params.matrices_in_gpu_ram;
float epsError = 1+params.eps;
bool use_heap = params.use_heap;
int istride=queries.stride/sizeof(ElementType);
int ostride= indices.stride/4;
if( max_neighbors<0 ) max_neighbors=indices.cols;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* ostride);
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
thrust::device_vector<float> distsDev(queries.rows* max_neighbors);
thrust::device_vector<int> indicesDev(queries.rows* max_neighbors);
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.ptr() );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::count_if(indicesDev.begin(), indicesDev.end(), isNotMinusOne() );
}
else {
thrust::device_ptr<float> qd=thrust::device_pointer_cast(queries.ptr());
thrust::device_ptr<float> dd=thrust::device_pointer_cast(dists.ptr());
thrust::device_ptr<int> id=thrust::device_pointer_cast(indices.ptr());
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::transform(id, id+max_neighbors*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
return thrust::count_if(id, id+max_neighbors*queries.rows, isNotMinusOne() );
}
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::uploadTreeToGpu()
{
// just make sure that no weird alignment stuff is going on...
// shouldn't, but who knows
// (I would make this a (boost) static assertion, but so far flann seems to avoid boost
// assert( sizeof( KdTreeCudaPrivate::GpuNode)==sizeof( Node ) );
delete gpu_helper_;
gpu_helper_ = new GpuHelper;
gpu_helper_->gpu_points_=new thrust::device_vector<float4>(size_);
thrust::device_vector<float4> tmp(size_);
if( get_param(index_params_,"input_is_gpu_float4",false) ) {
assert( dataset_.cols == 3 && dataset_.stride==4*sizeof(float));
thrust::copy( thrust::device_pointer_cast((float4*)dataset_.ptr()),thrust::device_pointer_cast((float4*)(dataset_.ptr()))+size_,tmp.begin());
}
else {
// k is limited to 4 -> use 128bit-alignment regardless of dimensionality
// makes cpu search about 5% slower, but gpu can read a float4 w/ a single instruction
// (vs a float2 and a float load for a float3 value)
// pad data directly to avoid having to copy and re-format the data when
// copying it to the GPU
data_ = flann::Matrix<ElementType>(new ElementType[size_*4], size_, dim_,4*4);
for (size_t i=0; i<size_; ++i) {
for (size_t j=0; j<dim_; ++j) {
data_[i][j] = dataset_[i][j];
}
for (size_t j=dim_; j<4; ++j) {
data_[i][j] = 0;
}
}
thrust::copy((float4*)data_.ptr(),(float4*)(data_.ptr())+size_,tmp.begin());
}
CudaKdTreeBuilder builder( tmp, leaf_max_size_ );
builder.buildTree();
gpu_helper_->gpu_splits_ = builder.splits_;
gpu_helper_->gpu_aabb_min_ = builder.aabb_min_;
gpu_helper_->gpu_aabb_max_ = builder.aabb_max_;
gpu_helper_->gpu_child1_ = builder.child1_;
gpu_helper_->gpu_parent_=builder.parent_;
gpu_helper_->gpu_vind_=builder.index_x_;
thrust::gather( builder.index_x_->begin(), builder.index_x_->end(), tmp.begin(), gpu_helper_->gpu_points_->begin());
// gpu_helper_->gpu_nodes_=new thrust::device_vector<KdTreeCudaPrivate::GpuNode>(node_count_);
// gpu_helper_->gpu_vind_=new thrust::device_vector<int>(size_);
// thrust::copy( (KdTreeCudaPrivate::GpuNode*)&(tree_[0]), ((KdTreeCudaPrivate::GpuNode*)&(tree_[0]))+tree_.size(), gpu_helper_->gpu_nodes_->begin());
// thrust::copy(vind_.begin(),vind_.end(),gpu_helper_->gpu_vind_->begin());
// buildGpuTree();
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::clearGpuBuffers()
{
delete gpu_helper_;
gpu_helper_=0;
}
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2_Simple<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L1<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L1<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L1<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L1<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
}
| 07f47142e9843752e35975ad50522ee898004a37.cu | /**********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2011 Andreas Muetzel ([email protected]). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#include "kdtree_cuda_3d_index.h"
#include <flann/algorithms/dist.h>
#include <flann/util/cuda/result_set.h>
// #define THRUST_DEBUG 1
#include <cuda.h>
#include <thrust/gather.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <vector_types.h>
#include <flann/util/cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <flann/util/cuda/heap.h>
#include <thrust/scan.h>
#include <thrust/count.h>
#include <thrust/gather.h>
#include <flann/algorithms/kdtree_cuda_builder.h>
#include <vector_types.h>
namespace flann
{
namespace KdTreeCudaPrivate
{
template< typename GPUResultSet, typename Distance >
__device__
void searchNeighbors(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbLow,
const float4* aabbHigh, const float4* elements, const float4& q, GPUResultSet& result, const Distance& distance = Distance() )
{
bool backtrack=false;
int lastNode=-1;
int current=0;
cuda::kd_tree_builder_detail::SplitInfo split;
while(true) {
if( current==-1 ) break;
split = splits[current];
float diff1;
if( split.split_dim==0 ) diff1=q.x- split.split_val;
else if( split.split_dim==1 ) diff1=q.y- split.split_val;
else if( split.split_dim==2 ) diff1=q.z- split.split_val;
// children are next to each other: leftChild+1 == rightChild
int leftChild= child1[current];
int bestChild=leftChild;
int otherChild=leftChild;
if ((diff1)<0) {
otherChild++;
}
else {
bestChild++;
}
if( !backtrack ) {
/* If this is a leaf node, then do check and return. */
if (leftChild==-1) {
for (int i=split.left; i<split.right; ++i) {
float dist=distance.dist(elements[i],q);
result.insert(i,dist);
}
backtrack=true;
lastNode=current;
current=parent[current];
}
else { // go to closer child node
lastNode=current;
current=bestChild;
}
}
else { // continue moving back up the tree or visit far node?
// minimum possible distance between query point and a point inside the AABB
float mindistsq=0;
float4 aabbMin=aabbLow[otherChild];
float4 aabbMax=aabbHigh[otherChild];
if( q.x < aabbMin.x ) mindistsq+=distance.axisDist(q.x, aabbMin.x);
else if( q.x > aabbMax.x ) mindistsq+=distance.axisDist(q.x, aabbMax.x);
if( q.y < aabbMin.y ) mindistsq+=distance.axisDist(q.y, aabbMin.y);
else if( q.y > aabbMax.y ) mindistsq+=distance.axisDist(q.y, aabbMax.y);
if( q.z < aabbMin.z ) mindistsq+=distance.axisDist(q.z, aabbMin.z);
else if( q.z > aabbMax.z ) mindistsq+=distance.axisDist(q.z, aabbMax.z);
// the far node was NOT the last node (== not visited yet) AND there could be a closer point in it
if(( lastNode==bestChild) && (mindistsq <= result.worstDist() ) ) {
lastNode=current;
current=otherChild;
backtrack=false;
}
else {
lastNode=current;
current=parent[current];
}
}
}
}
template< typename GPUResultSet, typename Distance >
__global__
void nearestKernel(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbMin,
const float4* aabbMax, const float4* elements, const float* query, int stride, int resultStride, int* resultIndex, float* resultDist, int querysize, GPUResultSet result, Distance dist = Distance())
{
typedef float DistanceType;
typedef float ElementType;
// typedef DistanceType float;
size_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if( tid >= querysize ) return;
float4 q = make_float4(query[tid*stride],query[tid*stride+1],query[tid*stride+2],0);
result.setResultLocation( resultDist, resultIndex, tid, resultStride );
searchNeighbors(splits,child1,parent,aabbMin,aabbMax,elements, q, result, dist);
result.finish();
}
}
//! contains some pointers that use cuda data types and that cannot be easily
//! forward-declared.
//! basically it contains all GPU buffers
template<typename Distance>
struct KDTreeCuda3dIndex<Distance>::GpuHelper
{
thrust::device_vector< cuda::kd_tree_builder_detail::SplitInfo >* gpu_splits_;
thrust::device_vector< int >* gpu_parent_;
thrust::device_vector< int >* gpu_child1_;
thrust::device_vector< float4 >* gpu_aabb_min_;
thrust::device_vector< float4 >* gpu_aabb_max_;
thrust::device_vector<float4>* gpu_points_;
thrust::device_vector<int>* gpu_vind_;
GpuHelper() : gpu_splits_(0), gpu_parent_(0), gpu_child1_(0), gpu_aabb_min_(0), gpu_aabb_max_(0), gpu_points_(0), gpu_vind_(0){
}
~GpuHelper()
{
delete gpu_splits_;
gpu_splits_=0;
delete gpu_parent_;
gpu_parent_=0;
delete gpu_child1_;
gpu_child1_=0;
delete gpu_aabb_max_;
gpu_aabb_max_=0;
delete gpu_aabb_min_;
gpu_aabb_min_=0;
delete gpu_vind_;
gpu_vind_=0;
delete gpu_points_;
gpu_points_=0;
}
};
//! thrust transform functor
//! transforms indices in the internal data set back to the original indices
struct map_indices
{
const int* v_;
map_indices(const int* v) : v_(v) {
}
__host__ __device__
float operator() (const int&i) const
{
if( i>= 0 ) return v_[i];
else return i;
}
};
//! implementation of L2 distance for the CUDA kernels
struct CudaL2
{
static float
__host__ __device__
axisDist( float a, float b )
{
return (a-b)*(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
float4 diff = a-b;
return dot(diff,diff);
}
};
//! implementation of L1 distance for the CUDA kernels
//! NOT TESTED!
struct CudaL1
{
static float
__host__ __device__
axisDist( float a, float b )
{
return fabs(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
return fabs(a.x-b.x)+fabs (a.y-b.y)+( a.z-b.z)+(a.w-b.w);
}
};
//! used to adapt CPU and GPU distance types.
//! specializations define the ::type as their corresponding GPU distance type
//! \see GpuDistance< L2<float> >, GpuDistance< L2_Simple<float> >
template< class Distance >
struct GpuDistance
{
};
template<>
struct GpuDistance< L2<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L2_Simple<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L1<float> >
{
typedef CudaL1 type;
};
template< typename Distance >
void KDTreeCuda3dIndex<Distance>::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const
{
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(int(indices.cols) >= knn);
assert( dists.cols == indices.cols && dists.stride==indices.stride );
int istride=queries.stride/sizeof(ElementType);
int ostride=indices.stride/4;
bool matrices_on_gpu = params.matrices_in_gpu_ram;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
float epsError = 1+params.eps;
bool sorted = params.sorted;
bool use_heap = params.use_heap;
typename GpuDistance<Distance>::type distance;
// std::cout<<" search: "<<std::endl;
// std::cout<<" rows: "<<indices.rows<<" "<<dists.rows<<" "<<queries.rows<<std::endl;
// std::cout<<" cols: "<<indices.cols<<" "<<dists.cols<<" "<<queries.cols<<std::endl;
// std::cout<<" stride: "<<indices.stride<<" "<<dists.stride<<" "<<queries.stride<<std::endl;
// std::cout<<" stride2:"<<istride<<" "<<ostride<<std::endl;
// std::cout<<" knn:"<<knn<<" matrices_on_gpu:"<<matrices_on_gpu<<std::endl;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
thrust::device_vector<float> distsDev(queries.rows* ostride);
thrust::device_vector<int> indicesDev(queries.rows* ostride);
if( knn==1 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.ptr() );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
}
else {
thrust::device_ptr<float> qd = thrust::device_pointer_cast(queries.ptr());
thrust::device_ptr<float> dd = thrust::device_pointer_cast(dists.ptr());
thrust::device_ptr<int> id = thrust::device_pointer_cast(indices.ptr());
if( knn==1 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::transform(id, id+knn*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
}
}
template< typename Distance>
int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const
{
// assert(indices.roasdfws >= queries.rows);
// assert(dists.rows >= queries.rows);
int max_neighbors = params.max_neighbors;
bool sorted = params.sorted;
bool use_heap = params.use_heap;
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int istride=queries.stride/sizeof(ElementType);
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
thrust::device_vector<int> countsDev(queries.rows);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&countsDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,max_neighbors),
distance
);
thrust::host_vector<int> counts_host=countsDev;
if( max_neighbors!=0 ) { // we'll need this later, but the exclusive_scan will change the array
for( size_t i=0; i<queries.rows; i++ ) {
int count = counts_host[i];
if( count > 0 ) {
indices[i].resize(count);
dists[i].resize(count);
}
else {
indices[i].clear();
dists[i].clear();
}
}
}
int neighbors_last_elem = countsDev.back();
thrust::exclusive_scan( countsDev.begin(), countsDev.end(), countsDev.begin() );
size_t total_neighbors=neighbors_last_elem+countsDev.back();
if( max_neighbors==0 ) return total_neighbors;
thrust::device_vector<int> indicesDev(total_neighbors,-1);
thrust::device_vector<float> distsDev(total_neighbors,std::numeric_limits<float>::infinity());
if( max_neighbors<0 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),sorted), distance);
}
}
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::host_vector<int> indices_temp = indicesDev;
thrust::host_vector<float> dists_temp = distsDev;
int buffer_index=0;
for( size_t i=0; i<queries.rows; i++ ) {
for( size_t j=0; j<counts_host[i]; j++ ) {
dists[i][j]=dists_temp[buffer_index];
indices[i][j]=indices_temp[buffer_index];
++buffer_index;
}
}
return buffer_index;
}
//! used in the radius search to count the total number of neighbors
struct isNotMinusOne
{
__host__ __device__
bool operator() ( int i ){
return i!=-1;
}
};
template< typename Distance>
int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const
{
int max_neighbors = params.max_neighbors;
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows || max_neighbors==0 );
assert(indices.stride==dists.stride || max_neighbors==0 );
assert( indices.cols==indices.stride/sizeof(int) );
assert(dists.rows >= queries.rows || max_neighbors==0 );
bool sorted = params.sorted;
bool matrices_on_gpu = params.matrices_in_gpu_ram;
float epsError = 1+params.eps;
bool use_heap = params.use_heap;
int istride=queries.stride/sizeof(ElementType);
int ostride= indices.stride/4;
if( max_neighbors<0 ) max_neighbors=indices.cols;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(istride* queries.rows,0);
thrust::copy( queries.ptr(), queries.ptr()+istride*queries.rows, queriesDev.begin() );
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* ostride);
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
thrust::device_vector<float> distsDev(queries.rows* max_neighbors);
thrust::device_vector<int> indicesDev(queries.rows* max_neighbors);
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
istride,
ostride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.ptr() );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::count_if(indicesDev.begin(), indicesDev.end(), isNotMinusOne() );
}
else {
thrust::device_ptr<float> qd=thrust::device_pointer_cast(queries.ptr());
thrust::device_ptr<float> dd=thrust::device_pointer_cast(dists.ptr());
thrust::device_ptr<int> id=thrust::device_pointer_cast(indices.ptr());
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
istride,
ostride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::transform(id, id+max_neighbors*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
return thrust::count_if(id, id+max_neighbors*queries.rows, isNotMinusOne() );
}
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::uploadTreeToGpu()
{
// just make sure that no weird alignment stuff is going on...
// shouldn't, but who knows
// (I would make this a (boost) static assertion, but so far flann seems to avoid boost
// assert( sizeof( KdTreeCudaPrivate::GpuNode)==sizeof( Node ) );
delete gpu_helper_;
gpu_helper_ = new GpuHelper;
gpu_helper_->gpu_points_=new thrust::device_vector<float4>(size_);
thrust::device_vector<float4> tmp(size_);
if( get_param(index_params_,"input_is_gpu_float4",false) ) {
assert( dataset_.cols == 3 && dataset_.stride==4*sizeof(float));
thrust::copy( thrust::device_pointer_cast((float4*)dataset_.ptr()),thrust::device_pointer_cast((float4*)(dataset_.ptr()))+size_,tmp.begin());
}
else {
// k is limited to 4 -> use 128bit-alignment regardless of dimensionality
// makes cpu search about 5% slower, but gpu can read a float4 w/ a single instruction
// (vs a float2 and a float load for a float3 value)
// pad data directly to avoid having to copy and re-format the data when
// copying it to the GPU
data_ = flann::Matrix<ElementType>(new ElementType[size_*4], size_, dim_,4*4);
for (size_t i=0; i<size_; ++i) {
for (size_t j=0; j<dim_; ++j) {
data_[i][j] = dataset_[i][j];
}
for (size_t j=dim_; j<4; ++j) {
data_[i][j] = 0;
}
}
thrust::copy((float4*)data_.ptr(),(float4*)(data_.ptr())+size_,tmp.begin());
}
CudaKdTreeBuilder builder( tmp, leaf_max_size_ );
builder.buildTree();
gpu_helper_->gpu_splits_ = builder.splits_;
gpu_helper_->gpu_aabb_min_ = builder.aabb_min_;
gpu_helper_->gpu_aabb_max_ = builder.aabb_max_;
gpu_helper_->gpu_child1_ = builder.child1_;
gpu_helper_->gpu_parent_=builder.parent_;
gpu_helper_->gpu_vind_=builder.index_x_;
thrust::gather( builder.index_x_->begin(), builder.index_x_->end(), tmp.begin(), gpu_helper_->gpu_points_->begin());
// gpu_helper_->gpu_nodes_=new thrust::device_vector<KdTreeCudaPrivate::GpuNode>(node_count_);
// gpu_helper_->gpu_vind_=new thrust::device_vector<int>(size_);
// thrust::copy( (KdTreeCudaPrivate::GpuNode*)&(tree_[0]), ((KdTreeCudaPrivate::GpuNode*)&(tree_[0]))+tree_.size(), gpu_helper_->gpu_nodes_->begin());
// thrust::copy(vind_.begin(),vind_.end(),gpu_helper_->gpu_vind_->begin());
// buildGpuTree();
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::clearGpuBuffers()
{
delete gpu_helper_;
gpu_helper_=0;
}
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2_Simple<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L1<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L1<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L1<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L1<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const;
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const;
}
|
1ff58d1f008d7764953b534b0c4be97c72dc2ee8.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//#include<iostream>
#include <stdint.h>
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#define CHECK(cmd) \
{\
hipError_t error = cmd;\
if (error != hipSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", hipGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
__device__ void tile_partition(cooperative_groups::thread_group g)
{
cooperative_groups::thread_group tile8 = cooperative_groups::tiled_partition(g,16);
//printf("tile8 size is: %d\n",tile8.size());
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("offset: %d\n",offset);
printf("thread rank is: %d\n",tile8.thread_rank());
if(offset<8)
//if(tile8.thread_rank() <8)
{
//__syncthreads();
tile8.sync();
printf("I am after tile8.sync()\n");
printf("I am in offset<8\n");
}
else if((offset>7) && (offset<16))
{
printf("I am in offset<16\n");
}
else if((offset>15) && (offset<24))
{
printf("I am in offset<24\n");
}
tile8.sync();
//__syncthreads();
}
__global__ void
//vector_square(float *C_d, float *A_d, size_t N)
thread_partition()
{
/* cooperative_groups::grid_group grid = cooperative_groups::this_grid();
unsigned int rank = grid.thread_rank();
unsigned int grid_size = grid.size();*/
cooperative_groups::thread_group g = cooperative_groups::this_thread_block();
tile_partition(g);
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
}
int main(int argc, char *argv[])
{
CHECK(hipSetDevice(2));
// float *A_d, *C_d;
// float *A_h, *C_h;
//size_t N = 1000000;
size_t N = 32;
size_t Nbytes = N * sizeof(float);
hipDeviceProp_t props;
CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
/* printf ("info: copy Host2Device\n");
CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
int max_blocks_per_sm;
CHECK( hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm,
vector_square, 32, 0));*/
const unsigned threadsPerBlock = 64;
const unsigned blocks = N/threadsPerBlock;
printf ("info: launch 'vector_square' kernel\n");
// vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
//CHECK(hipDeviceSynchronize());
void *coop_params=NULL;
/*coop_params[0]=(void*)&C_d,
coop_params[1]=(void*)&A_d;
coop_params[2]=(void*)&N;
hipStream_t stream;
CHECK(hipStreamCreate(&stream));*/
hipError_t errval=(hipLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,&coop_params,0,0));
//hipError_t errval=(hipLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,NULL,0,0));
CHECK(hipDeviceSynchronize());
std::cout<<"errval: "<<hipGetErrorString(errval)<<std::endl;
if (errval != hipSuccess)
{
std::cout << "CUDA error: " << hipGetErrorString(errval);
std::cout << std::endl;
std::cout << " Location: " << __FILE__ << ":" << __LINE__ << std::endl;
exit(errval);
}
printf ("DONE!\n");
return 0;
}
| 1ff58d1f008d7764953b534b0c4be97c72dc2ee8.cu | /*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//#include<iostream>
#include <stdint.h>
#include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
__device__ void tile_partition(cooperative_groups::thread_group g)
{
cooperative_groups::thread_group tile8 = cooperative_groups::tiled_partition(g,16);
//printf("tile8 size is: %d\n",tile8.size());
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("offset: %d\n",offset);
printf("thread rank is: %d\n",tile8.thread_rank());
if(offset<8)
//if(tile8.thread_rank() <8)
{
//__syncthreads();
tile8.sync();
printf("I am after tile8.sync()\n");
printf("I am in offset<8\n");
}
else if((offset>7) && (offset<16))
{
printf("I am in offset<16\n");
}
else if((offset>15) && (offset<24))
{
printf("I am in offset<24\n");
}
tile8.sync();
//__syncthreads();
}
__global__ void
//vector_square(float *C_d, float *A_d, size_t N)
thread_partition()
{
/* cooperative_groups::grid_group grid = cooperative_groups::this_grid();
unsigned int rank = grid.thread_rank();
unsigned int grid_size = grid.size();*/
cooperative_groups::thread_group g = cooperative_groups::this_thread_block();
tile_partition(g);
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
}
int main(int argc, char *argv[])
{
CHECK(cudaSetDevice(2));
// float *A_d, *C_d;
// float *A_h, *C_h;
//size_t N = 1000000;
size_t N = 32;
size_t Nbytes = N * sizeof(float);
cudaDeviceProp props;
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
/* printf ("info: copy Host2Device\n");
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
int max_blocks_per_sm;
CHECK( cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm,
vector_square, 32, 0));*/
const unsigned threadsPerBlock = 64;
const unsigned blocks = N/threadsPerBlock;
printf ("info: launch 'vector_square' kernel\n");
// vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
//CHECK(cudaDeviceSynchronize());
void *coop_params=NULL;
/*coop_params[0]=(void*)&C_d,
coop_params[1]=(void*)&A_d;
coop_params[2]=(void*)&N;
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));*/
cudaError_t errval=(cudaLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,&coop_params,0,0));
//cudaError_t errval=(cudaLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,NULL,0,0));
CHECK(cudaDeviceSynchronize());
std::cout<<"errval: "<<cudaGetErrorString(errval)<<std::endl;
if (errval != cudaSuccess)
{
std::cout << "CUDA error: " << cudaGetErrorString(errval);
std::cout << std::endl;
std::cout << " Location: " << __FILE__ << ":" << __LINE__ << std::endl;
exit(errval);
}
printf ("DONE!\n");
return 0;
}
|
5180fc81101ca9834d1decd289cb57ad8ec3450b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "SobelFilter_kernels.h"
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static hipArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
__device__ __host__ float absolute(float v)
{
if(v < 0.0f) return -v;
return v;
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale )
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short) (fScale*(absolute(Horz)+absolute(Vert)));
if ( Sum < 0 ) return 0; else if ( Sum > 0xff ) return 0xff;
return (unsigned char) Sum;
}
__global__ void
SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale )
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-RADIUS+0), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-RADIUS+1), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-RADIUS+2), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-RADIUS+3), (float) (v-RADIUS) );
}
if ( threadIdx.y < RADIUS*2 ) {
//
// copy trailing RADIUS*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-RADIUS+0), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-RADIUS+1), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-RADIUS+2), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-RADIUS+3), (float) (v+blockDim.y-RADIUS) );
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) {
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale );
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel( pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale );
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel( pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
if ( u+ib < w/4 && v < h ) {
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 );
unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 );
unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 );
unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 );
unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 );
unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 );
unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 );
unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 );
unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 );
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
hipChannelFormatDesc desc;
if (Bpp == 1) {
desc = hipCreateChannelDesc<unsigned char>();
} else {
desc = hipCreateChannelDesc<uchar4>();
}
cutilSafeCall(hipMallocArray(&array, &desc, iw, ih));
cutilSafeCall(hipMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, hipMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
cutilSafeCall(hipFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
cutilSafeCall(hipBindTextureToArray(tex, array));
switch ( mode ) {
case SOBELDISPLAY_IMAGE:
hipLaunchKernelGGL(( SobelCopyImage), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELTEX:
hipLaunchKernelGGL(( SobelTex), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*RADIUS);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
hipLaunchKernelGGL(( SobelShared), dim3(blocks), dim3(threads), sharedMem, 0, (uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale );
}
break;
}
cutilSafeCall(hipUnbindTexture(tex));
}
| 5180fc81101ca9834d1decd289cb57ad8ec3450b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "SobelFilter_kernels.h"
// Texture reference for reading image
texture<unsigned char, 2> tex;
extern __shared__ unsigned char LocalBlock[];
static cudaArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
__device__ __host__ float absolute(float v)
{
if(v < 0.0f) return -v;
return v;
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale )
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short) (fScale*(absolute(Horz)+absolute(Vert)));
if ( Sum < 0 ) return 0; else if ( Sum > 0xff ) return 0xff;
return (unsigned char) Sum;
}
__global__ void
SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale )
{
short u = 4*blockIdx.x*BlockWidth;
short v = blockIdx.y*blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-RADIUS+0), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-RADIUS+1), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-RADIUS+2), (float) (v-RADIUS) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-RADIUS+3), (float) (v-RADIUS) );
}
if ( threadIdx.y < RADIUS*2 ) {
//
// copy trailing RADIUS*2 rows of pixels into shared
//
SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) {
LocalBlock[SharedIdx+4*ib+0] = tex2D( tex,
(float) (u+4*ib-RADIUS+0), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+1] = tex2D( tex,
(float) (u+4*ib-RADIUS+1), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+2] = tex2D( tex,
(float) (u+4*ib-RADIUS+2), (float) (v+blockDim.y-RADIUS) );
LocalBlock[SharedIdx+4*ib+3] = tex2D( tex,
(float) (u+4*ib-RADIUS+3), (float) (v+blockDim.y-RADIUS) );
}
}
__syncthreads();
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) {
unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0];
unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1];
unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2];
unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0];
unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1];
unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2];
unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0];
unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1];
unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2];
uchar4 out;
out.x = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3];
pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3];
pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3];
out.y = ComputeSobel(pix01, pix02, pix00,
pix11, pix12, pix10,
pix21, pix22, pix20, fScale );
pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4];
pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4];
pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4];
out.z = ComputeSobel( pix02, pix00, pix01,
pix12, pix10, pix11,
pix22, pix20, pix21, fScale );
pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5];
pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5];
pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5];
out.w = ComputeSobel( pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
if ( u+ib < w/4 && v < h ) {
pSobel[u+ib] = out;
}
}
__syncthreads();
}
__global__ void
SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex( Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale )
{
unsigned char *pSobel =
(unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch);
for ( int i = threadIdx.x; i < w; i += blockDim.x ) {
unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 );
unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 );
unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 );
unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 );
unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 );
unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 );
unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 );
unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 );
unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 );
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale );
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
cudaChannelFormatDesc desc;
if (Bpp == 1) {
desc = cudaCreateChannelDesc<unsigned char>();
} else {
desc = cudaCreateChannelDesc<uchar4>();
}
cutilSafeCall(cudaMallocArray(&array, &desc, iw, ih));
cutilSafeCall(cudaMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, cudaMemcpyHostToDevice));
}
extern "C" void deleteTexture(void)
{
cutilSafeCall(cudaFreeArray(array));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
cutilSafeCall(cudaBindTextureToArray(tex, array));
switch ( mode ) {
case SOBELDISPLAY_IMAGE:
SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELTEX:
SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale );
break;
case SOBELDISPLAY_SOBELSHARED:
{
dim3 threads(16,4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)),
ih/threads.y+(0!=ih%threads.y));
int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f);
int sharedMem = SharedPitch*(threads.y+2*RADIUS);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
SobelShared<<<blocks, threads, sharedMem>>>((uchar4 *) odata,
iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale );
}
break;
}
cutilSafeCall(cudaUnbindTexture(tex));
}
|
54f9303236580571efcba751695b6340bc21975a.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "errors.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
inline int cudaCheckErrors(const char * msg)
{
hipError_t __err = hipGetLastError();
if (__err != hipSuccess)
{
printf("CUDA:voxel_backprojection:%s:%s\n",msg, hipGetErrorString(__err));
hipDeviceReset();
return 1;
}
return 0;
}
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTexture(int num_devices,float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream, int nStreamDevice,bool allocate);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDev[5*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDev[6*projNumber+1];
Point3D deltaZ = projParamsArrayDev[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDev[6*projNumber+4];
Point3D S = projParamsArrayDev[6*projNumber+5];
float sinalpha = projSinCosArrayDev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArrayDev[5*projNumber+1];
float COR = projSinCosArrayDev[5*projNumber+2];
float DSD = projSinCosArrayDev[5*projNumber+3];
float DSO = projSinCosArrayDev[5*projNumber+4];
float auxCOR=COR/geo.dDetecU;
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-auxCOR;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
float weigth;
float realx,realy;
realx=-(geo.sVoxelX+geo.dVoxelX)*0.5f +indX*geo.dVoxelX +xyzOffset.x;
realy=-(geo.sVoxelY+geo.dVoxelY)*0.5f +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=__fdividef(DSO+realy*sinalpha-realx*cosalpha,DSO);
weigth=__frcp_rd(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D<float>(tex, v, u ,indAlpha+0.5f)*weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if(cudaCheckErrors("Device query fail")){return 1;}
if (deviceCount == 0) {
//mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
return ERR_NO_CAPABLE_DEVICES;
}
// Check the available devices, and if they are the same
int dev;
checkDevices();
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(deviceCount,geo,nalpha,&split_image,&split_projections);
if(cudaCheckErrors("Error")){return 1;}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
hipHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
if (isHostRegisterSupported ){
hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable);
}
if(cudaCheckErrors("Error pinning memory")){return 1;}
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMalloc((void**)&dimage[dev], num_bytes_img);
if(cudaCheckErrors("hipMalloc fail")){return 1;}
}
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStreamDevice; ++i){
hipStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArrayHost;
hipHostMalloc((void**)&projParamsArrayHost,6*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArrayHost;
hipHostMalloc((void**)&projSinCosArrayHost,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
hipTextureObject_t *texProj;
hipArray **d_cuArrTex;
texProj =(hipTextureObject_t*)malloc(deviceCount*2*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(deviceCount*2*sizeof(hipArray*));
// Auxiliary Host page-locked memory for fast and asycnornous memcpy.
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int proj_split_overlap_number;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemset(dimage[dev],0,num_bytes_img);
if(cudaCheckErrors("memset fail")){return 1;}
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(proj_split_overlap_number*sizeof(float*));
proj_split_size=(size_t*)malloc(proj_split_overlap_number*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store resutl
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture(deviceCount,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStreamDevice+1]);
}
// Pin the next chunk of projection data, unpin the current one.
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
hipSetDevice(dev);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
// mexPrintf("%u %f \n",i,geoArray[img_slice*deviceCount+dev].alpha);
// mexPrintf("%u \n",currProjNumber_global);
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArrayHost[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArrayHost[5*j+1]=cosalpha;
projSinCosArrayHost[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArrayHost[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArrayHost[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[6*j+1]=deltaY;
projParamsArrayHost[6*j+2]=deltaZ;
projParamsArrayHost[6*j+3]=xyzOrigin;
projParamsArrayHost[6*j+4]=offOrig;
projParamsArrayHost[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbolAsync(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*5*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}// END for deviceCount
} // END sub-split of current projection chunk
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
} // END projection splits
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
// We do not need to sycnronize because the array dealocators already do.
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
hipMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, hipMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
} // end image splits
if(cudaCheckErrors("Main loop fail")){return 1;}
///////// Cleaning:
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDestroyTextureObject(texProj[i*deviceCount+dev]);
hipFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
if(cudaCheckErrors("cudadestroy textures result fail")){return 1;}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipFree(dimage[dev]);
}
hipHostFree(projSinCosArrayHost);
hipHostFree(projParamsArrayHost);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
hipHostUnregister(result);
}
if (isHostRegisterSupported){
hipHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]);
if(cudaCheckErrors("hipFree fail")){return 1;}
// hipDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
//
void checkDevices(void){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
char * devicenames;
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
printf("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
devicenames=deviceProp.name;
}
}
void splitCTbackprojection(int deviceCount,Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void CreateTexture(int num_devices, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent =make_hipExtent(geo.nDetecV, geo.nDetecU, nangles);
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(dev);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
//______________________________________________________________________________
//
// Function: createGeoArray
//
// Description: This code generates the geometries needed to split the image properly in
// cases where the entire image does not fit in the memory of the GPU
//______________________________________________________________________________
void createGeoArray(unsigned int image_splits, Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned int splitsize=(geo.nVoxelZ+image_splits-1)/image_splits;
for(unsigned int sp=0;sp<image_splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: max(geo.nVoxelZ-splitsize*sp,0);
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
void eulerZYZT(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha))
+auxPoint.z*cos(geo.psi)*sin(geo.theta);
point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha))
+auxPoint.z*sin(geo.psi)*sin(geo.theta);
point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha)
+auxPoint.y*sin(geo.theta)*sin(geo.alpha)
+auxPoint.z*cos(geo.theta);
}
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
printf("voxel_backprojection:voxel_backprojection:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| 54f9303236580571efcba751695b6340bc21975a.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "errors.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
inline int cudaCheckErrors(const char * msg)
{
cudaError_t __err = cudaGetLastError();
if (__err != cudaSuccess)
{
printf("CUDA:voxel_backprojection:%s:%s\n",msg, cudaGetErrorString(__err));
cudaDeviceReset();
return 1;
}
return 0;
}
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTexture(int num_devices,float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream, int nStreamDevice,bool allocate);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDev[5*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDev[6*projNumber+1];
Point3D deltaZ = projParamsArrayDev[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDev[6*projNumber+4];
Point3D S = projParamsArrayDev[6*projNumber+5];
float sinalpha = projSinCosArrayDev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArrayDev[5*projNumber+1];
float COR = projSinCosArrayDev[5*projNumber+2];
float DSD = projSinCosArrayDev[5*projNumber+3];
float DSO = projSinCosArrayDev[5*projNumber+4];
float auxCOR=COR/geo.dDetecU;
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-auxCOR;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
float weigth;
float realx,realy;
realx=-(geo.sVoxelX+geo.dVoxelX)*0.5f +indX*geo.dVoxelX +xyzOffset.x;
realy=-(geo.sVoxelY+geo.dVoxelY)*0.5f +indY*geo.dVoxelY +xyzOffset.y+COR;
weigth=__fdividef(DSO+realy*sinalpha-realx*cosalpha,DSO);
weigth=__frcp_rd(weigth*weigth);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D<float>(tex, v, u ,indAlpha+0.5f)*weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if(cudaCheckErrors("Device query fail")){return 1;}
if (deviceCount == 0) {
//mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
return ERR_NO_CAPABLE_DEVICES;
}
// Check the available devices, and if they are the same
int dev;
checkDevices();
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(deviceCount,geo,nalpha,&split_image,&split_projections);
if(cudaCheckErrors("Error")){return 1;}
//Pagelock memory for syncronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if (isHostRegisterSupported ){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if(cudaCheckErrors("Error pinning memory")){return 1;}
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMalloc((void**)&dimage[dev], num_bytes_img);
if(cudaCheckErrors("cudaMalloc fail")){return 1;}
}
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArrayHost;
cudaMallocHost((void**)&projParamsArrayHost,6*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArrayHost;
cudaMallocHost((void**)&projSinCosArrayHost,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*));
// Auxiliary Host page-locked memory for fast and asycnornous memcpy.
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int proj_split_overlap_number;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemset(dimage[dev],0,num_bytes_img);
if(cudaCheckErrors("memset fail")){return 1;}
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(proj_split_overlap_number*sizeof(float*));
proj_split_size=(size_t*)malloc(proj_split_overlap_number*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store resutl
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture(deviceCount,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStreamDevice+1]);
}
// Pin the next chunk of projection data, unpin the current one.
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
cudaSetDevice(dev);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
// mexPrintf("%u %f \n",i,geoArray[img_slice*deviceCount+dev].alpha);
// mexPrintf("%u \n",currProjNumber_global);
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArrayHost[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArrayHost[5*j+1]=cosalpha;
projSinCosArrayHost[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArrayHost[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArrayHost[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[6*j+1]=deltaY;
projParamsArrayHost[6*j+2]=deltaZ;
projParamsArrayHost[6*j+3]=xyzOrigin;
projParamsArrayHost[6*j+4]=offOrig;
projParamsArrayHost[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
kernelPixelBackprojectionFDK<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}// END for deviceCount
} // END sub-split of current projection chunk
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
} // END projection splits
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
// We do not need to sycnronize because the array dealocators already do.
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
} // end image splits
if(cudaCheckErrors("Main loop fail")){return 1;}
///////// Cleaning:
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDestroyTextureObject(texProj[i*deviceCount+dev]);
cudaFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
if(cudaCheckErrors("cudadestroy textures result fail")){return 1;}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaFree(dimage[dev]);
}
cudaFreeHost(projSinCosArrayHost);
cudaFreeHost(projParamsArrayHost);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
cudaHostUnregister(result);
}
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
if(cudaCheckErrors("cudaFree fail")){return 1;}
// cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
//
void checkDevices(void){
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
int dev;
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
char * devicenames;
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
printf("Atb:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275.");
break;
}
}
devicenames=deviceProp.name;
}
}
void splitCTbackprojection(int deviceCount,Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void CreateTexture(int num_devices, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(dev);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
//______________________________________________________________________________
//
// Function: createGeoArray
//
// Description: This code generates the geometries needed to split the image properly in
// cases where the entire image does not fit in the memory of the GPU
//______________________________________________________________________________
void createGeoArray(unsigned int image_splits, Geometry geo,Geometry* geoArray, unsigned int nangles){
unsigned int splitsize=(geo.nVoxelZ+image_splits-1)/image_splits;
for(unsigned int sp=0;sp<image_splits;sp++){
geoArray[sp]=geo;
// All of them are splitsize, but the last one, possible
geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: max(geo.nVoxelZ-splitsize*sp,0);
geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ;
// We need to redefine the offsets, as now each subimage is not aligned in the origin.
geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float));
for (unsigned int i=0;i<nangles;i++){
geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2;
}
}
}
//______________________________________________________________________________
//
// Function: freeGeoArray
//
// Description: Frees the memory from the geometry array for multiGPU.
//______________________________________________________________________________
void freeGeoArray(unsigned int splits,Geometry* geoArray){
for(unsigned int sp=0;sp<splits;sp++){
free(geoArray[sp].offOrigZ);
}
free(geoArray);
}
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
}
void eulerZYZT(Geometry geo, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha))
+auxPoint.z*cos(geo.psi)*sin(geo.theta);
point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha))
+auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha))
+auxPoint.z*sin(geo.psi)*sin(geo.theta);
point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha)
+auxPoint.y*sin(geo.theta)*sin(geo.alpha)
+auxPoint.z*cos(geo.theta);
}
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
printf("voxel_backprojection:voxel_backprojection:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
29bfb72a8d3dba866d2ccf64c4e542b14095f6da.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_60_40_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 29bfb72a8d3dba866d2ccf64c4e542b14095f6da.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_60_40_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
fea2710ffd5caab47482ccc4fcac7c8fd6300fb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Example Matlab cuda kernel interface.
*/
__device__
void pointSource(double rVar, double r1, double r2, double wPerf, double cblood, double kCond, double mueff, double u0, double ua, double Power, double *temperature )
{
double pi = 3.141592653589793;
//*temperature =ua-(exp(-mueff*rVar)*powf(mueff,2)*Power)/(4*kCond*powf(mueff,2)*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar);
*temperature=ua-(exp(-mueff*rVar)*powf(mueff,2)*Power)/(4*kCond*powf(mueff,2)*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar)-exp(mueff*rVar+r2*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(rVar,2)*(-1+r2*sqrt((cblood*wPerf)/kCond))+4*exp(mueff*(r1+rVar)+r2*sqrt((cblood*wPerf)/kCond))*pi*r1*powf(rVar,2)*(u0-ua)*(-kCond*powf(mueff,2)+cblood*wPerf)*(-1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*powf(rVar,3)*(-kCond*powf(mueff,2)+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))))+(exp(-mueff*(r1+rVar)+(2*r1+rVar)*sqrt((cblood*wPerf)/kCond))*(-exp(mueff*r1+r2*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar)-exp(mueff*rVar+r1*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(rVar,2)*(1+r2*sqrt((cblood*wPerf)/kCond))-4*exp(mueff*(r1+rVar)+r1*sqrt((cblood*wPerf)/kCond))*pi*r1*powf(rVar,2)*(u0-ua)*(kCond*powf(mueff,2)-cblood*wPerf)*(1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*powf(r1,2)*powf(rVar,3)*(-kCond*powf(mueff,2)+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))));
// *temperature = ua-(exp(-mueff*rVar)*mueff*mueff*Power)/(4*kCond*mueff*mueff*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*mueff*mueff*Power*r2*r2*(1+mueff*rVar)-exp(mueff*rVar+r2*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*rVar*rVar*(-1+r2*sqrt((cblood*wPerf)/kCond))+4*exp(mueff*(r1+rVar)+r2*sqrt((cblood*wPerf)/kCond))*pi*r1*rVar*rVar*(u0-ua)*(-kCond*mueff*mueff+cblood*wPerf)*(-1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*rVar*rVar*rVar*(-kCond*mueff*mueff+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))))+(exp(-mueff*(r1+rVar)+(2*r1+rVar)*sqrt((cblood*wPerf)/kCond))*(-exp(mueff*r1+r2*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*r2*r2*(1+mueff*rVar)-exp(mueff*rVar+r1*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*rVar*rVar*(1+r2*sqrt((cblood*wPerf)/kCond))-4*exp(mueff*(r1+rVar)+r1*sqrt((cblood*wPerf)/kCond))*pi*r1*rVar*rVar*(u0-ua)*(kCond*mueff*mueff-cblood*wPerf)*(1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*r1*r1*rVar*rVar*rVar*(-kCond*mueff*mueff+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))));
// *temperature = ua+(P*PI_Var*(mueff*mueff)*exp(-mueff*r)*(1.0/4.0))/(r*(w-k*(mueff*mueff)))-(exp(-R1*mueff-R2*mueff)*exp(r*sqrt(w/k))*(P*PI_Var*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R2*mueff)-P*PI_Var*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R1*mueff)-P*PI_Var*R2*(mueff*mueff*mueff)*exp(R2*sqrt(w/k))*exp(R1*mueff)-R1*u0*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*ua*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*k*(mueff*mueff)*u0*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*k*(mueff*mueff)*ua*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+P*PI_Var*R2*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R2*mueff)*sqrt(w/k)-R1*R2*u0*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*ua*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*k*(mueff*mueff)*u0*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0-R1*R2*k*(mueff*mueff)*ua*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0)*(1.0/4.0))/(r*(w-k*(mueff*mueff))*(exp(R1*sqrt(w/k)*2.0)-exp(R2*sqrt(w/k)*2.0)+R2*exp(R1*sqrt(w/k)*2.0)*sqrt(w/k)+R2*exp(R2*sqrt(w/k)*2.0)*sqrt(w/k)))-(exp(R1*sqrt(w/k))*exp(R2*sqrt(w/k))*exp(-r*sqrt(w/k))*exp(-R1*mueff)*exp(-R2*mueff)*(P*PI_Var*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R1*mueff)-P*PI_Var*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R2*mueff)+P*PI_Var*R2*(mueff*mueff*mueff)*exp(R1*sqrt(w/k))*exp(R1*mueff)+R1*u0*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*ua*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*k*(mueff*mueff)*u0*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*k*(mueff*mueff)*ua*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+P*PI_Var*R2*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R2*mueff)*sqrt(w/k)-R1*R2*u0*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*ua*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*k*(mueff*mueff)*u0*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0-R1*R2*k*(mueff*mueff)*ua*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0)*(1.0/4.0))/(r*(w-k*(mueff*mueff))*(exp(R1*sqrt(w/k)*2.0)-exp(R2*sqrt(w/k)*2.0)+R2*exp(R1*sqrt(w/k)*2.0)*sqrt(w/k)+R2*exp(R2*sqrt(w/k)*2.0)*sqrt(w/k)));
}
__device__
void DebugWrite(int idx,int idmat,double rad,double omega, double conduction, double mueff,double temp)
{
printf("%d %d %12.5e %12.5e %12.5e %12.5e %12.5e\n",idx,idmat,rad,omega,conduction,mueff,temp);
//int j,k;
//for (j=0;j<n;j++) {
// for (k=0;k<n+1;k++) {
// printf("%d %d %12.5e %12.5e ",k,j,a[k][j].real(),a[k][j].imag());
// }
// printf(" | %d %12.5e %12.5e \n",j,x[j].real(),x[j].imag());
//}
//printf("\n");
}
/*
* Device code
*/
__global__
void steadyStatePennesLaser(
int const NTissue,
const int* MaterialID,
const double* Perfusion,
const double* ThermalConduction,
const double* EffectiveAttenuation,
double const innerRadius,
double const outerRadius,
int const NSource,
double const Power,
const double* SourceXloc,
const double* SourceYloc,
const double* SourceZloc,
double const InitialTemperature,
double const ArterialTemperature,
double const SpecificHeatBlood,
double const SpacingX,
double const SpacingY,
double const SpacingZ,
int const NpixelX,
int const NpixelY,
int const NpixelZ,
double* d_TemperatureArray)
{
// double SpacingX=0.00078;
/*
grid stride loop design pattern, 1-d grid
http://devblogs.nvidia.com/parallelforall/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
- By using a loop, you can support any problem size even if it exceeds the largest grid size your CUDA device supports. Moreover, you can limit the number of blocks you use to tune performance.
*/
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < NpixelX * NpixelY * NpixelZ;
idx += blockDim.x * gridDim.x)
{
// compute indices
int index = idx; // use dummy variable
int kkk = index/(NpixelX*NpixelY);
index -= kkk*NpixelX*NpixelY;
int jjj = index/NpixelX;
index -= jjj*NpixelX;
int iii = index/1;
/* get material parameters */
int const idmaterial = MaterialID[idx];
double omega = Perfusion[idmaterial];
double conduction = ThermalConduction[idmaterial];
double mueff = EffectiveAttenuation[idmaterial];
// printf("%d",mueff);
// linear superpostion of temperature sources
double temperature = 0.0;
for (int lll=0;lll<NSource;lll++)
{
// double radiusSQ = (iii * SpacingX + 0.13281 - SourceXloc[lll])*(iii * SpacingX + 0.13281 - SourceXloc[lll])
// + (jjj * SpacingY + 0.10547 - SourceYloc[lll])*(jjj * SpacingY + 0.10547 - SourceYloc[lll])
// + (kkk * SpacingZ + 0.06000 - SourceZloc[lll])*(kkk * SpacingZ + 0.06000- SourceZloc[lll]);
double radiusSQ=powf(iii*SpacingX-SourceXloc[lll],2)
+powf(jjj*SpacingY-SourceYloc[lll],2)
+powf(kkk*SpacingZ-SourceZloc[lll],2);//SourceXloc[0]*SourceXloc[0];
double radius = sqrt(radiusSQ);
// call GF code
double sourcetemperature;
pointSource(radius, innerRadius, outerRadius, omega , SpecificHeatBlood, conduction , mueff, InitialTemperature, ArterialTemperature, Power , &sourcetemperature);
if (radius <= innerRadius && NSource ==1)
{
sourcetemperature = InitialTemperature;
}
if (radius <= innerRadius && NSource == 10)
{
sourcetemperature = InitialTemperature+55;
}
if (radius <= innerRadius && NSource > 1)
{
sourcetemperature = InitialTemperature;
}
// DebugWrite(idx,idmaterial,radius,omega,conduction,mueff,sourcetemperature);
// superposition
if (idmaterial==0)
{
temperature=0;
}
else
{
temperature = temperature + sourcetemperature/((double)NSource);
}
}
// store temperature in array
d_TemperatureArray[idx] = temperature;
}
}
| fea2710ffd5caab47482ccc4fcac7c8fd6300fb0.cu | /*
* Example Matlab cuda kernel interface.
*/
__device__
void pointSource(double rVar, double r1, double r2, double wPerf, double cblood, double kCond, double mueff, double u0, double ua, double Power, double *temperature )
{
double pi = 3.141592653589793;
//*temperature =ua-(exp(-mueff*rVar)*powf(mueff,2)*Power)/(4*kCond*powf(mueff,2)*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar);
*temperature=ua-(exp(-mueff*rVar)*powf(mueff,2)*Power)/(4*kCond*powf(mueff,2)*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar)-exp(mueff*rVar+r2*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(rVar,2)*(-1+r2*sqrt((cblood*wPerf)/kCond))+4*exp(mueff*(r1+rVar)+r2*sqrt((cblood*wPerf)/kCond))*pi*r1*powf(rVar,2)*(u0-ua)*(-kCond*powf(mueff,2)+cblood*wPerf)*(-1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*powf(rVar,3)*(-kCond*powf(mueff,2)+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))))+(exp(-mueff*(r1+rVar)+(2*r1+rVar)*sqrt((cblood*wPerf)/kCond))*(-exp(mueff*r1+r2*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(r2,2)*(1+mueff*rVar)-exp(mueff*rVar+r1*sqrt((cblood*wPerf)/kCond))*powf(mueff,2)*Power*powf(rVar,2)*(1+r2*sqrt((cblood*wPerf)/kCond))-4*exp(mueff*(r1+rVar)+r1*sqrt((cblood*wPerf)/kCond))*pi*r1*powf(rVar,2)*(u0-ua)*(kCond*powf(mueff,2)-cblood*wPerf)*(1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*powf(r1,2)*powf(rVar,3)*(-kCond*powf(mueff,2)+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))));
// *temperature = ua-(exp(-mueff*rVar)*mueff*mueff*Power)/(4*kCond*mueff*mueff*pi*rVar-4*cblood*pi*rVar*wPerf)+(exp(-mueff*(r1+rVar)+(r1+r2-rVar)*sqrt((cblood*wPerf)/kCond))*(exp(r1*(mueff+sqrt((cblood*wPerf)/kCond)))*mueff*mueff*Power*r2*r2*(1+mueff*rVar)-exp(mueff*rVar+r2*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*rVar*rVar*(-1+r2*sqrt((cblood*wPerf)/kCond))+4*exp(mueff*(r1+rVar)+r2*sqrt((cblood*wPerf)/kCond))*pi*r1*rVar*rVar*(u0-ua)*(-kCond*mueff*mueff+cblood*wPerf)*(-1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*rVar*rVar*rVar*(-kCond*mueff*mueff+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))))+(exp(-mueff*(r1+rVar)+(2*r1+rVar)*sqrt((cblood*wPerf)/kCond))*(-exp(mueff*r1+r2*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*r2*r2*(1+mueff*rVar)-exp(mueff*rVar+r1*sqrt((cblood*wPerf)/kCond))*mueff*mueff*Power*rVar*rVar*(1+r2*sqrt((cblood*wPerf)/kCond))-4*exp(mueff*(r1+rVar)+r1*sqrt((cblood*wPerf)/kCond))*pi*r1*rVar*rVar*(u0-ua)*(kCond*mueff*mueff-cblood*wPerf)*(1+r2*sqrt((cblood*wPerf)/kCond))))/(4*pi*r1*r1*rVar*rVar*rVar*(-kCond*mueff*mueff+cblood*wPerf)*(exp(2*r2*sqrt((cblood*wPerf)/kCond))*(-1+r2*sqrt((cblood*wPerf)/kCond))+exp(2*r1*sqrt((cblood*wPerf)/kCond))*(1+r2*sqrt((cblood*wPerf)/kCond))));
// *temperature = ua+(P*PI_Var*(mueff*mueff)*exp(-mueff*r)*(1.0/4.0))/(r*(w-k*(mueff*mueff)))-(exp(-R1*mueff-R2*mueff)*exp(r*sqrt(w/k))*(P*PI_Var*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R2*mueff)-P*PI_Var*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R1*mueff)-P*PI_Var*R2*(mueff*mueff*mueff)*exp(R2*sqrt(w/k))*exp(R1*mueff)-R1*u0*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*ua*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*k*(mueff*mueff)*u0*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*k*(mueff*mueff)*ua*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+P*PI_Var*R2*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R2*mueff)*sqrt(w/k)-R1*R2*u0*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*ua*w*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*k*(mueff*mueff)*u0*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0-R1*R2*k*(mueff*mueff)*ua*exp(R1*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0)*(1.0/4.0))/(r*(w-k*(mueff*mueff))*(exp(R1*sqrt(w/k)*2.0)-exp(R2*sqrt(w/k)*2.0)+R2*exp(R1*sqrt(w/k)*2.0)*sqrt(w/k)+R2*exp(R2*sqrt(w/k)*2.0)*sqrt(w/k)))-(exp(R1*sqrt(w/k))*exp(R2*sqrt(w/k))*exp(-r*sqrt(w/k))*exp(-R1*mueff)*exp(-R2*mueff)*(P*PI_Var*(mueff*mueff)*exp(R1*sqrt(w/k))*exp(R1*mueff)-P*PI_Var*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R2*mueff)+P*PI_Var*R2*(mueff*mueff*mueff)*exp(R1*sqrt(w/k))*exp(R1*mueff)+R1*u0*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*ua*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0-R1*k*(mueff*mueff)*u0*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+R1*k*(mueff*mueff)*ua*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*4.0+P*PI_Var*R2*(mueff*mueff)*exp(R2*sqrt(w/k))*exp(R2*mueff)*sqrt(w/k)-R1*R2*u0*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*ua*w*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0+R1*R2*k*(mueff*mueff)*u0*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0-R1*R2*k*(mueff*mueff)*ua*exp(R2*sqrt(w/k))*exp(R1*mueff)*exp(R2*mueff)*sqrt(w/k)*4.0)*(1.0/4.0))/(r*(w-k*(mueff*mueff))*(exp(R1*sqrt(w/k)*2.0)-exp(R2*sqrt(w/k)*2.0)+R2*exp(R1*sqrt(w/k)*2.0)*sqrt(w/k)+R2*exp(R2*sqrt(w/k)*2.0)*sqrt(w/k)));
}
__device__
void DebugWrite(int idx,int idmat,double rad,double omega, double conduction, double mueff,double temp)
{
printf("%d %d %12.5e %12.5e %12.5e %12.5e %12.5e\n",idx,idmat,rad,omega,conduction,mueff,temp);
//int j,k;
//for (j=0;j<n;j++) {
// for (k=0;k<n+1;k++) {
// printf("%d %d %12.5e %12.5e ",k,j,a[k][j].real(),a[k][j].imag());
// }
// printf(" | %d %12.5e %12.5e \n",j,x[j].real(),x[j].imag());
//}
//printf("\n");
}
/*
* Device code
*/
__global__
void steadyStatePennesLaser(
int const NTissue,
const int* MaterialID,
const double* Perfusion,
const double* ThermalConduction,
const double* EffectiveAttenuation,
double const innerRadius,
double const outerRadius,
int const NSource,
double const Power,
const double* SourceXloc,
const double* SourceYloc,
const double* SourceZloc,
double const InitialTemperature,
double const ArterialTemperature,
double const SpecificHeatBlood,
double const SpacingX,
double const SpacingY,
double const SpacingZ,
int const NpixelX,
int const NpixelY,
int const NpixelZ,
double* d_TemperatureArray)
{
// double SpacingX=0.00078;
/*
grid stride loop design pattern, 1-d grid
http://devblogs.nvidia.com/parallelforall/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
- By using a loop, you can support any problem size even if it exceeds the largest grid size your CUDA device supports. Moreover, you can limit the number of blocks you use to tune performance.
*/
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < NpixelX * NpixelY * NpixelZ;
idx += blockDim.x * gridDim.x)
{
// compute indices
int index = idx; // use dummy variable
int kkk = index/(NpixelX*NpixelY);
index -= kkk*NpixelX*NpixelY;
int jjj = index/NpixelX;
index -= jjj*NpixelX;
int iii = index/1;
/* get material parameters */
int const idmaterial = MaterialID[idx];
double omega = Perfusion[idmaterial];
double conduction = ThermalConduction[idmaterial];
double mueff = EffectiveAttenuation[idmaterial];
// printf("%d",mueff);
// linear superpostion of temperature sources
double temperature = 0.0;
for (int lll=0;lll<NSource;lll++)
{
// double radiusSQ = (iii * SpacingX + 0.13281 - SourceXloc[lll])*(iii * SpacingX + 0.13281 - SourceXloc[lll])
// + (jjj * SpacingY + 0.10547 - SourceYloc[lll])*(jjj * SpacingY + 0.10547 - SourceYloc[lll])
// + (kkk * SpacingZ + 0.06000 - SourceZloc[lll])*(kkk * SpacingZ + 0.06000- SourceZloc[lll]);
double radiusSQ=powf(iii*SpacingX-SourceXloc[lll],2)
+powf(jjj*SpacingY-SourceYloc[lll],2)
+powf(kkk*SpacingZ-SourceZloc[lll],2);//SourceXloc[0]*SourceXloc[0];
double radius = sqrt(radiusSQ);
// call GF code
double sourcetemperature;
pointSource(radius, innerRadius, outerRadius, omega , SpecificHeatBlood, conduction , mueff, InitialTemperature, ArterialTemperature, Power , &sourcetemperature);
if (radius <= innerRadius && NSource ==1)
{
sourcetemperature = InitialTemperature;
}
if (radius <= innerRadius && NSource == 10)
{
sourcetemperature = InitialTemperature+55;
}
if (radius <= innerRadius && NSource > 1)
{
sourcetemperature = InitialTemperature;
}
// DebugWrite(idx,idmaterial,radius,omega,conduction,mueff,sourcetemperature);
// superposition
if (idmaterial==0)
{
temperature=0;
}
else
{
temperature = temperature + sourcetemperature/((double)NSource);
}
}
// store temperature in array
d_TemperatureArray[idx] = temperature;
}
}
|
c33bf5cf7f6acb4eae28531f69d1920e65ea2a9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDACC_RTC__
#include "../Include/objects_list.h"
#include "../Include/cuda_defines.cuh"
#else
#define CREATE_OBJECT_TYPE_DESCRIPTION(__TYPE__,__STRUCT__) \
class __TYPE__ { \
protected: \
public: \
typedef __STRUCT__ data_struct; \
};
#include "objects_list.h"
#include "hip_defines.cuh"
#endif
#define _ATOMIC __device__ __forceinline__
#define _KERNEL extern "C" __global__ void
#define _PTR *__restrict__
namespace atomic {
_ATOMIC point new_point( const scalar &x, const scalar &y, const scalar &z ) {
return point{ x, y, z };
}
_ATOMIC point mul_point( const point &p, const scalar &s ) {
return point{ s * p.x, s * p.y, s * p.z };
}
_ATOMIC point add_point( const point &p1, const point &p2 ) {
return point{ p1.x + p2.x, p1.y + p2.y, p1.z + p2.z };
}
_ATOMIC scalar dot( const point &p1, const point &p2 ) {
return p1.x * p2.x + p1.y * p2.y + p1.z * p2.z;
}
_ATOMIC scalar mix( const scalar &a, const scalar &b, const scalar &x ) {
return a + x * ( b - a );
}
};
namespace primitives {
// TYPE_LIST
CREATE_OBJECT_TYPE_DEFINITION(
portanta_sfero,
{
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
scalar d = length_3( P.x, P.y, P.z ) - data->r;
primitives::bazo_ptr o = obj + data->o;
if ( d <= RAYS_MIN_DIST ) return RAYS_DIST( o, P );
else return d;
},
{
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
primitives::bazo_ptr o = obj + data->o;
return RAYS_NORM( o, P );
} );
CREATE_OBJECT_TYPE_DEFINITION(
sfero,
{
return length_3( p.x, p.y, p.z ) - data->r;
},
{
return p;
} );
CREATE_OBJECT_TYPE_DEFINITION(
kubo,
{
point q;
q.x = fabsf( p.x ) - data->b.x;
q.y = fabsf( p.y ) - data->b.y;
q.z = fabsf( p.z ) - data->b.z;
if ( q.x < 0.f && q.y < 0.f && q.z < 0.f )
return max( q.x, max( q.y, q.z ) );
else
return length_3( max( q.x, 0.f ), max( q.y, 0.f ), max( q.z, 0.f ) );
},
{
point q;
q.x = fabsf( p.x ) - data->b.x;
q.y = fabsf( p.y ) - data->b.y;
q.z = fabsf( p.z ) - data->b.z;
if ( q.x < 0.f && q.y < 0.f && q.z < 0.f )
return q.x > q.z ? ( q.x > q.y ? atomic::new_point( p.x > 0.f ? 1.f : -1.f, 0.f, 0.f ) : atomic::new_point(
0.f, p.y > 0.f ? 1.f : -1.f, 0.f ) ) : ( q.y > q.z ? atomic::new_point( 0.f, p.y > 0.f ? 1.f : -1.f,
0.f ) : atomic::new_point(
0.f, 0.f, p.z > 0.f ? 1.f : -1.f ) );
else
return atomic::new_point( q.x > 0.f ? p.x > 0.f ? 1.f : -1.f : 0.f, q.y > 0.f ? p.y > 0.f ? 1.f : -1.f : 0.f, q.z > 0.f ? p.z > 0.f ? 1.f : -1.f : 0.f );
} );
CREATE_OBJECT_TYPE_DEFINITION(
cilindro,
{
scalar r = length_2( p.x, p.y );
float2 q;
q.x = r - data->r;
q.y = fabsf( p.z ) - data->h;
if ( q.x < 0.f && q.y < 0.f )
return q.x > q.y ? q.x : q.y;
else
return length_2( max( q.x, 0.f ), max( q.y, 0.f ) );
},
{
scalar r = length_2( p.x, p.y );
float2 q;
q.x = r - data->r;
q.y = fabsf( p.z ) - data->h;
return q.x > q.y ? atomic::new_point( p.x, p.y, 0.f ) : atomic::new_point( 0.f, 0.f,
p.z > 0.f ? 1.f : -1.f );
} );
CREATE_OBJECT_TYPE_DEFINITION(
ebeno,
{
return atomic::dot( data->n, p );
},
{
return data->n;
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
return min( d0, d1 );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
if ( d0 < d1 ) return RAYS_NORM( o0, p );
else return RAYS_NORM( o1, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_3,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = min( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_min = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_min = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) { i_min = 2; }
o = obj + data->o[ i_min ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_4,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 3 ];
d = min( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_min = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_min = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 2;
}
o = obj + data->o[ 3 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) { i_min = 3; }
o = obj + data->o[ i_min ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
return max( d0, d1 );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
if ( d0 > d1 ) return RAYS_NORM( o0, p );
else return RAYS_NORM( o1, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_3,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = max( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_max = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_max = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) { i_max = 2; }
o = obj + data->o[ i_max ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_4,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 3 ];
d = max( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_max = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_max = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 2;
}
o = obj + data->o[ 3 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) { i_max = 3; }
o = obj + data->o[ i_max ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komplemento,
{
primitives::bazo_ptr O = obj + data->o;
scalar D = RAYS_DIST( O, p );
return -D;
},
{
primitives::bazo_ptr O = obj + data->o;
point N = RAYS_NORM( O, p );
return atomic::new_point( -N.x, -N.y, -N.z );
} );
CREATE_OBJECT_TYPE_DEFINITION(
glata_kunigajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f - ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return d0;
if ( h < 0.f ) return d1;
return atomic::mix( d0, d1, h ) - data->k * h * ( 1.f - h );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f - ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return RAYS_NORM( o0, p );
if ( h < 0.f ) return RAYS_NORM( o1, p );
point n0 = RAYS_NORM( o0, p );
point n1 = RAYS_NORM( o1, p );
d0 = r_length_3( n0.x, n0.y, n0.z );
d1 = r_length_3( n1.x, n1.y, n1.z );
return atomic::new_point( atomic::mix( d0 * n0.x, d1 * n1.x, h ), atomic::mix( d0 * n0.y, d1 * n1.y, h ),
atomic::mix( d0 * n0.z, d1 * n1.z, h ) );
} );
CREATE_OBJECT_TYPE_DEFINITION(
glata_komunajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f + ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return d0;
if ( h < 0.f ) return d1;
return atomic::mix( d0, d1, h ) + data->k * h * ( 1.f - h );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f + ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return RAYS_NORM( o0, p );
if ( h < 0.f ) return RAYS_NORM( o1, p );
point n0 = RAYS_NORM( o0, p );
point n1 = RAYS_NORM( o1, p );
d0 = r_length_3( n0.x, n0.y, n0.z );
d1 = r_length_3( n1.x, n1.y, n1.z );
return atomic::new_point( atomic::mix( d0 * n0.x, d1 * n1.x, h ), atomic::mix( d0 * n0.y, d1 * n1.y, h ),
atomic::mix( d0 * n0.z, d1 * n1.z, h ) );
} );
CREATE_OBJECT_TYPE_DEFINITION(
movo,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
return RAYS_NORM( O, P );
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioX,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.y = data->cos_phi * p.y + data->sin_phi * p.z;
P.z = -data->sin_phi * p.y + data->cos_phi * p.z;
P.x = p.x;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.y = data->cos_phi * p.y + data->sin_phi * p.z;
P.z = -data->sin_phi * p.y + data->cos_phi * p.z;
P.x = p.x;
_P = RAYS_NORM( O, P );
P.y = data->cos_phi * _P.y - data->sin_phi * _P.z;
P.z = data->sin_phi * _P.y + data->cos_phi * _P.z;
P.x = _P.x;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioY,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.z = data->cos_phi * p.z + data->sin_phi * p.x;
P.x = -data->sin_phi * p.z + data->cos_phi * p.x;
P.y = p.y;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.z = data->cos_phi * p.z + data->sin_phi * p.x;
P.x = -data->sin_phi * p.z + data->cos_phi * p.x;
P.y = p.y;
_P = RAYS_NORM( O, P );
P.z = data->cos_phi * _P.z - data->sin_phi * _P.x;
P.x = data->sin_phi * _P.z + data->cos_phi * _P.x;
P.y = _P.y;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioZ,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = data->cos_phi * p.x + data->sin_phi * p.y;
P.y = -data->sin_phi * p.x + data->cos_phi * p.y;
P.z = p.z;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.x = data->cos_phi * p.x + data->sin_phi * p.y;
P.y = -data->sin_phi * p.x + data->cos_phi * p.y;
P.z = p.z;
_P = RAYS_NORM( O, P );
P.x = data->cos_phi * _P.x - data->sin_phi * _P.y;
P.y = data->sin_phi * _P.x + data->cos_phi * _P.y;
P.z = _P.z;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioQ,
{
primitives::bazo_ptr O = obj + data->o;
matrix Q;
scalar temp;
Q.x.x = data->q.x * data->q.x;
Q.y.y = data->q.y * data->q.y;
Q.z.z = data->q.z * data->q.z;
temp = Q.x.x + Q.y.y + Q.z.z;
Q.x.x -= temp;
Q.y.y -= temp;
Q.z.z -= temp;
Q.x.y = data->q.x * data->q.y;
temp = data->q.z * data->q_w;
Q.y.x = Q.x.y + temp;
Q.x.y -= temp;
Q.y.z = data->q.y * data->q.z;
temp = data->q.x * data->q_w;
Q.z.y = Q.y.z + temp;
Q.y.z -= temp;
Q.z.x = data->q.z * data->q.x;
temp = data->q.y * data->q_w;
Q.x.z = Q.z.x + temp;
Q.z.x -= temp;
point P = p;
P.x += 2.f * ( Q.x.x * p.x + Q.x.y * p.y + Q.x.z * p.z );
P.y += 2.f * ( Q.y.x * p.x + Q.y.y * p.y + Q.y.z * p.z );
P.z += 2.f * ( Q.z.x * p.x + Q.z.y * p.y + Q.z.z * p.z );
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
matrix Q;
scalar temp;
Q.x.x = data->q.x * data->q.x;
Q.y.y = data->q.y * data->q.y;
Q.z.z = data->q.z * data->q.z;
temp = Q.x.x + Q.y.y + Q.z.z;
Q.x.x -= temp;
Q.y.y -= temp;
Q.z.z -= temp;
Q.x.y = data->q.x * data->q.y;
temp = data->q.z * data->q_w;
Q.y.x = Q.x.y + temp;
Q.x.y -= temp;
Q.y.z = data->q.y * data->q.z;
temp = data->q.x * data->q_w;
Q.z.y = Q.y.z + temp;
Q.y.z -= temp;
Q.z.x = data->q.z * data->q.x;
temp = data->q.y * data->q_w;
Q.x.z = Q.z.x + temp;
Q.z.x -= temp;
point P = p;
P.x += 2.f * ( Q.x.x * p.x + Q.x.y * p.y + Q.x.z * p.z );
P.y += 2.f * ( Q.y.x * p.x + Q.y.y * p.y + Q.y.z * p.z );
P.z += 2.f * ( Q.z.x * p.x + Q.z.y * p.y + Q.z.z * p.z );
point N = RAYS_NORM( O, P );
P = N;
P.x += 2.f * ( Q.x.x * N.x + Q.y.x * N.y + Q.z.x * N.z );
P.y += 2.f * ( Q.x.y * N.x + Q.y.y * N.y + Q.z.y * N.z );
P.z += 2.f * ( Q.x.z * N.x + Q.y.z * N.y + Q.z.z * N.z );
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
senfina_ripeto,
{
primitives::bazo_ptr o = obj + data->o;
point a = data->a;
scalar N = floorf( atomic::dot( a, p ) / atomic::dot( a, a ) + .5f );
a.x = p.x - N * a.x;
a.y = p.y - N * a.y;
a.z = p.z - N * a.z;
return RAYS_DIST( o, a );
},
{
primitives::bazo_ptr o = obj + data->o;
point a = data->a;
scalar N = floorf( atomic::dot( a, p ) / atomic::dot( a, a ) + .5f );
a.x = p.x - N * a.x;
a.y = p.y - N * a.y;
a.z = p.z - N * a.z;
return RAYS_NORM( o, a );
} );
};
// Kernels definitions
_KERNEL kernel_Process( const size_t *Width, const size_t *Height, const rays_info *Info_d, ray _PTR Rays,
primitives::bazo _PTR Primitives_d, const size_t *PrimitivesNum, hipSurfaceObject_t Image ) {
coord
x = CUDA_RAYS_COORD_nD( x, 2 ),
y = CUDA_RAYS_COORD_nD( y, 2 ),
id = RAYS_PRIMITIVES_PER_THREAD * ( threadIdx.y * RAYS_BLOCK_2D_x + threadIdx.x ),
w = *Width, h = *Height;
// RAYS_BLOCK_2D_x * RAYS_BLOCK_2D_y * PRIMITIVES_PER_THREAD >= PrimitivesNum
__shared__ primitives::bazo curr_ptr[ RAYS_BLOCK_2D_x * RAYS_BLOCK_2D_y * RAYS_PRIMITIVES_PER_THREAD ];
if ( id < *PrimitivesNum ) {
primitives::bazo_ptr self = curr_ptr + id;
#pragma unroll
for ( size_t i = 0; i < RAYS_PRIMITIVES_PER_THREAD; ++i, ++self ) {
*self = Primitives_d[ id + i ];
//CREATE_OBJECT_TYPE_PROCESSING_LISTING_2( self );
}
}
__syncthreads();
if ( x < w && y < h ) {
scalar curr_dist, ray_dist = 0;
ray r = Rays[ y * w + x ];
uchar4 PIXEL = { 0x00, 0x00, 0x00, 0xff };
point curr_norm, light = Info_d->LightSource; //point{ 1.f, 0.f, 0.f };
#pragma unroll
for ( size_t I = 0; I < 500; ++I ) {
curr_dist = RAYS_DIST( curr_ptr, r.p );
if ( curr_dist < RAYS_MIN_DIST ) {
if ( curr_dist < -0.f ) {
curr_norm.x = -r.d.x;
curr_norm.y = -r.d.y;
curr_norm.z = -r.d.z;
} else {
curr_norm = RAYS_NORM( curr_ptr, r.p );
scalar R_1 = r_length_3( curr_norm.x, curr_norm.y, curr_norm.z );
curr_norm = atomic::mul_point( curr_norm, R_1 );
}
if ( atomic::dot( curr_norm, r.d ) < 0.f ) {
float N_L = atomic::dot( curr_norm, light );
float
SHADOW = 1.f,
OCCLUSION = 0.f,
SCA = 1.f;
point p = r.p;
ray_dist = RAYS_MIN_DIST;
#define DELTA 1
#define HARDNESS 128.f
p = r.p;
p.x += ( 1 + DELTA ) * RAYS_MIN_DIST * light.x;
p.y += ( 1 + DELTA ) * RAYS_MIN_DIST * light.y;
p.z += ( 1 + DELTA ) * RAYS_MIN_DIST * light.z;
ray_dist = ( 1 + DELTA ) * RAYS_MIN_DIST;
for ( size_t J = 0; J < 300; ++J ) {
curr_dist = RAYS_DIST( curr_ptr, p );
if ( 8 * curr_dist < RAYS_MIN_DIST ) {
if ( curr_dist < -0.f ) {
curr_norm.x = -light.x;
curr_norm.y = -light.y;
curr_norm.z = -light.z;
} else {
curr_norm = RAYS_NORM( curr_ptr, p );
scalar R_1 = r_length_3( curr_norm.x, curr_norm.y, curr_norm.z );
curr_norm = atomic::mul_point( curr_norm, R_1 );
}
if ( atomic::dot( curr_norm, r.d ) < RAYS_MIN_DIST ) {
SHADOW = 0.f;
break;
}
}
SHADOW = min( SHADOW, HARDNESS * curr_dist / ray_dist );
if ( SHADOW < 0.01f )
break;
p.x += curr_dist * light.x;
p.y += curr_dist * light.y;
p.z += curr_dist * light.z;
ray_dist += curr_dist;
// LIGHT
if ( ray_dist >= RAYS_MAX_DIST )
break;
}
float3 MATERIAL = { .5f, .7f, 1.f };
raw_byte LIGHT =
0xff * ( RAYS_MIN_LUM + ( RAYS_MAX_LUM - RAYS_MIN_LUM ) * ( N_L > 0.f ? N_L : 0.f ) * SHADOW );
PIXEL = {
raw_byte( LIGHT * MATERIAL.x ),
raw_byte( LIGHT * MATERIAL.y ),
raw_byte( LIGHT * MATERIAL.z ),
0xff
};
break;
}
}
r.p.x += curr_dist * r.d.x;
r.p.y += curr_dist * r.d.y;
r.p.z += curr_dist * r.d.z;
ray_dist += curr_dist;
if ( ray_dist >= RAYS_MAX_DIST ) {
break;
}
}
surf2Dwrite( PIXEL, Image, x * 4, y );
}
}
_KERNEL kernel_SetPrimitives( primitives::bazo _PTR Primitives, const size_t *PrimitivesNum ) {
coord x = CUDA_RAYS_COORD_nD( x, 1 );
if ( x < *PrimitivesNum ) {
primitives::bazo_ptr self = Primitives + x;
CREATE_OBJECT_TYPE_PROCESSING_LISTING_2( self )
}
}
_KERNEL kernel_SetRays( const size_t *Width, const size_t *Height, rays_info _PTR Info_d, ray _PTR Rays_d ) {
coord
x = CUDA_RAYS_COORD_nD( x, 2 ),
y = CUDA_RAYS_COORD_nD( y, 2 );
__shared__ rays_info Info[ 1 ];
if ( threadIdx.x == 0 && threadIdx.y == 0 )
Info[ 0 ] = *Info_d;
__syncthreads();
if ( x < *Width && y < *Height ) {
scalar
X = .5f * float( 2 * x - coord( *Width ) + 1 ),
Y = .5f * float( 2 * y - coord( *Height ) + 1 ),
Z = Info->Depth;
point pos;
pos.x = X * Info->StartWVec.x + Y * Info->StartHVec.x;
pos.y = X * Info->StartWVec.y + Y * Info->StartHVec.y;
pos.z = X * Info->StartWVec.z + Y * Info->StartHVec.z;
point delta_pos;
delta_pos.x = Z * Info->StartDir.x;
delta_pos.y = Z * Info->StartDir.y;
delta_pos.z = Z * Info->StartDir.z;
scalar R_1 = rnorm3df( pos.x + delta_pos.x, pos.y + delta_pos.y, pos.z + delta_pos.z );
ray *self = Rays_d + y * *Width + x;
self->d = atomic::mul_point( atomic::add_point( pos, delta_pos ), R_1 );
self->p = atomic::add_point( pos, Info->StartPos );
}
}
| c33bf5cf7f6acb4eae28531f69d1920e65ea2a9f.cu | #ifndef __CUDACC_RTC__
#include "../Include/objects_list.h"
#include "../Include/cuda_defines.cuh"
#else
#define CREATE_OBJECT_TYPE_DESCRIPTION(__TYPE__,__STRUCT__) \
class __TYPE__ { \
protected: \
public: \
typedef __STRUCT__ data_struct; \
};
#include "objects_list.h"
#include "cuda_defines.cuh"
#endif
#define _ATOMIC __device__ __forceinline__
#define _KERNEL extern "C" __global__ void
#define _PTR *__restrict__
namespace atomic {
_ATOMIC point new_point( const scalar &x, const scalar &y, const scalar &z ) {
return point{ x, y, z };
}
_ATOMIC point mul_point( const point &p, const scalar &s ) {
return point{ s * p.x, s * p.y, s * p.z };
}
_ATOMIC point add_point( const point &p1, const point &p2 ) {
return point{ p1.x + p2.x, p1.y + p2.y, p1.z + p2.z };
}
_ATOMIC scalar dot( const point &p1, const point &p2 ) {
return p1.x * p2.x + p1.y * p2.y + p1.z * p2.z;
}
_ATOMIC scalar mix( const scalar &a, const scalar &b, const scalar &x ) {
return a + x * ( b - a );
}
};
namespace primitives {
// TYPE_LIST
CREATE_OBJECT_TYPE_DEFINITION(
portanta_sfero,
{
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
scalar d = length_3( P.x, P.y, P.z ) - data->r;
primitives::bazo_ptr o = obj + data->o;
if ( d <= RAYS_MIN_DIST ) return RAYS_DIST( o, P );
else return d;
},
{
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
primitives::bazo_ptr o = obj + data->o;
return RAYS_NORM( o, P );
} );
CREATE_OBJECT_TYPE_DEFINITION(
sfero,
{
return length_3( p.x, p.y, p.z ) - data->r;
},
{
return p;
} );
CREATE_OBJECT_TYPE_DEFINITION(
kubo,
{
point q;
q.x = fabsf( p.x ) - data->b.x;
q.y = fabsf( p.y ) - data->b.y;
q.z = fabsf( p.z ) - data->b.z;
if ( q.x < 0.f && q.y < 0.f && q.z < 0.f )
return max( q.x, max( q.y, q.z ) );
else
return length_3( max( q.x, 0.f ), max( q.y, 0.f ), max( q.z, 0.f ) );
},
{
point q;
q.x = fabsf( p.x ) - data->b.x;
q.y = fabsf( p.y ) - data->b.y;
q.z = fabsf( p.z ) - data->b.z;
if ( q.x < 0.f && q.y < 0.f && q.z < 0.f )
return q.x > q.z ? ( q.x > q.y ? atomic::new_point( p.x > 0.f ? 1.f : -1.f, 0.f, 0.f ) : atomic::new_point(
0.f, p.y > 0.f ? 1.f : -1.f, 0.f ) ) : ( q.y > q.z ? atomic::new_point( 0.f, p.y > 0.f ? 1.f : -1.f,
0.f ) : atomic::new_point(
0.f, 0.f, p.z > 0.f ? 1.f : -1.f ) );
else
return atomic::new_point( q.x > 0.f ? p.x > 0.f ? 1.f : -1.f : 0.f, q.y > 0.f ? p.y > 0.f ? 1.f : -1.f : 0.f, q.z > 0.f ? p.z > 0.f ? 1.f : -1.f : 0.f );
} );
CREATE_OBJECT_TYPE_DEFINITION(
cilindro,
{
scalar r = length_2( p.x, p.y );
float2 q;
q.x = r - data->r;
q.y = fabsf( p.z ) - data->h;
if ( q.x < 0.f && q.y < 0.f )
return q.x > q.y ? q.x : q.y;
else
return length_2( max( q.x, 0.f ), max( q.y, 0.f ) );
},
{
scalar r = length_2( p.x, p.y );
float2 q;
q.x = r - data->r;
q.y = fabsf( p.z ) - data->h;
return q.x > q.y ? atomic::new_point( p.x, p.y, 0.f ) : atomic::new_point( 0.f, 0.f,
p.z > 0.f ? 1.f : -1.f );
} );
CREATE_OBJECT_TYPE_DEFINITION(
ebeno,
{
return atomic::dot( data->n, p );
},
{
return data->n;
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
return min( d0, d1 );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
if ( d0 < d1 ) return RAYS_NORM( o0, p );
else return RAYS_NORM( o1, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_3,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = min( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_min = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_min = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) { i_min = 2; }
o = obj + data->o[ i_min ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
kunigajo_4,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = min( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 3 ];
d = min( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_min = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_min = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) {
d_min = d;
i_min = 2;
}
o = obj + data->o[ 3 ];
d = RAYS_DIST( o, p );
if ( d_min > d ) { i_min = 3; }
o = obj + data->o[ i_min ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
return max( d0, d1 );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
if ( d0 > d1 ) return RAYS_NORM( o0, p );
else return RAYS_NORM( o1, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_3,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = max( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_max = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_max = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) { i_max = 2; }
o = obj + data->o[ i_max ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komunajo_4,
{
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 2 ];
d = max( d, RAYS_DIST( o, p ) );
o = obj + data->o[ 3 ];
d = max( d, RAYS_DIST( o, p ) );
return d;
},
{
counter i_max = 0;
primitives::bazo_ptr o = obj + data->o[ 0 ];
scalar d;
scalar d_max = RAYS_DIST( o, p );
o = obj + data->o[ 1 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 1;
}
o = obj + data->o[ 2 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) {
d_max = d;
i_max = 2;
}
o = obj + data->o[ 3 ];
d = RAYS_DIST( o, p );
if ( d_max < d ) { i_max = 3; }
o = obj + data->o[ i_max ];
return RAYS_NORM( o, p );
} );
CREATE_OBJECT_TYPE_DEFINITION(
komplemento,
{
primitives::bazo_ptr O = obj + data->o;
scalar D = RAYS_DIST( O, p );
return -D;
},
{
primitives::bazo_ptr O = obj + data->o;
point N = RAYS_NORM( O, p );
return atomic::new_point( -N.x, -N.y, -N.z );
} );
CREATE_OBJECT_TYPE_DEFINITION(
glata_kunigajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f - ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return d0;
if ( h < 0.f ) return d1;
return atomic::mix( d0, d1, h ) - data->k * h * ( 1.f - h );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f - ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return RAYS_NORM( o0, p );
if ( h < 0.f ) return RAYS_NORM( o1, p );
point n0 = RAYS_NORM( o0, p );
point n1 = RAYS_NORM( o1, p );
d0 = r_length_3( n0.x, n0.y, n0.z );
d1 = r_length_3( n1.x, n1.y, n1.z );
return atomic::new_point( atomic::mix( d0 * n0.x, d1 * n1.x, h ), atomic::mix( d0 * n0.y, d1 * n1.y, h ),
atomic::mix( d0 * n0.z, d1 * n1.z, h ) );
} );
CREATE_OBJECT_TYPE_DEFINITION(
glata_komunajo_2,
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f + ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return d0;
if ( h < 0.f ) return d1;
return atomic::mix( d0, d1, h ) + data->k * h * ( 1.f - h );
},
{
primitives::bazo_ptr o0 = obj + data->o[ 0 ];
primitives::bazo_ptr o1 = obj + data->o[ 1 ];
scalar d0 = RAYS_DIST( o0, p );
scalar d1 = RAYS_DIST( o1, p );
scalar h = ( 1.f + ( d0 - d1 ) / data->k ) * .5f;
if ( h > 1.f ) return RAYS_NORM( o0, p );
if ( h < 0.f ) return RAYS_NORM( o1, p );
point n0 = RAYS_NORM( o0, p );
point n1 = RAYS_NORM( o1, p );
d0 = r_length_3( n0.x, n0.y, n0.z );
d1 = r_length_3( n1.x, n1.y, n1.z );
return atomic::new_point( atomic::mix( d0 * n0.x, d1 * n1.x, h ), atomic::mix( d0 * n0.y, d1 * n1.y, h ),
atomic::mix( d0 * n0.z, d1 * n1.z, h ) );
} );
CREATE_OBJECT_TYPE_DEFINITION(
movo,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = p.x - data->t.x;
P.y = p.y - data->t.y;
P.z = p.z - data->t.z;
return RAYS_NORM( O, P );
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioX,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.y = data->cos_phi * p.y + data->sin_phi * p.z;
P.z = -data->sin_phi * p.y + data->cos_phi * p.z;
P.x = p.x;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.y = data->cos_phi * p.y + data->sin_phi * p.z;
P.z = -data->sin_phi * p.y + data->cos_phi * p.z;
P.x = p.x;
_P = RAYS_NORM( O, P );
P.y = data->cos_phi * _P.y - data->sin_phi * _P.z;
P.z = data->sin_phi * _P.y + data->cos_phi * _P.z;
P.x = _P.x;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioY,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.z = data->cos_phi * p.z + data->sin_phi * p.x;
P.x = -data->sin_phi * p.z + data->cos_phi * p.x;
P.y = p.y;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.z = data->cos_phi * p.z + data->sin_phi * p.x;
P.x = -data->sin_phi * p.z + data->cos_phi * p.x;
P.y = p.y;
_P = RAYS_NORM( O, P );
P.z = data->cos_phi * _P.z - data->sin_phi * _P.x;
P.x = data->sin_phi * _P.z + data->cos_phi * _P.x;
P.y = _P.y;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioZ,
{
primitives::bazo_ptr O = obj + data->o;
point P;
P.x = data->cos_phi * p.x + data->sin_phi * p.y;
P.y = -data->sin_phi * p.x + data->cos_phi * p.y;
P.z = p.z;
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
point P;
point _P;
P.x = data->cos_phi * p.x + data->sin_phi * p.y;
P.y = -data->sin_phi * p.x + data->cos_phi * p.y;
P.z = p.z;
_P = RAYS_NORM( O, P );
P.x = data->cos_phi * _P.x - data->sin_phi * _P.y;
P.y = data->sin_phi * _P.x + data->cos_phi * _P.y;
P.z = _P.z;
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
rotacioQ,
{
primitives::bazo_ptr O = obj + data->o;
matrix Q;
scalar temp;
Q.x.x = data->q.x * data->q.x;
Q.y.y = data->q.y * data->q.y;
Q.z.z = data->q.z * data->q.z;
temp = Q.x.x + Q.y.y + Q.z.z;
Q.x.x -= temp;
Q.y.y -= temp;
Q.z.z -= temp;
Q.x.y = data->q.x * data->q.y;
temp = data->q.z * data->q_w;
Q.y.x = Q.x.y + temp;
Q.x.y -= temp;
Q.y.z = data->q.y * data->q.z;
temp = data->q.x * data->q_w;
Q.z.y = Q.y.z + temp;
Q.y.z -= temp;
Q.z.x = data->q.z * data->q.x;
temp = data->q.y * data->q_w;
Q.x.z = Q.z.x + temp;
Q.z.x -= temp;
point P = p;
P.x += 2.f * ( Q.x.x * p.x + Q.x.y * p.y + Q.x.z * p.z );
P.y += 2.f * ( Q.y.x * p.x + Q.y.y * p.y + Q.y.z * p.z );
P.z += 2.f * ( Q.z.x * p.x + Q.z.y * p.y + Q.z.z * p.z );
return RAYS_DIST( O, P );
},
{
primitives::bazo_ptr O = obj + data->o;
matrix Q;
scalar temp;
Q.x.x = data->q.x * data->q.x;
Q.y.y = data->q.y * data->q.y;
Q.z.z = data->q.z * data->q.z;
temp = Q.x.x + Q.y.y + Q.z.z;
Q.x.x -= temp;
Q.y.y -= temp;
Q.z.z -= temp;
Q.x.y = data->q.x * data->q.y;
temp = data->q.z * data->q_w;
Q.y.x = Q.x.y + temp;
Q.x.y -= temp;
Q.y.z = data->q.y * data->q.z;
temp = data->q.x * data->q_w;
Q.z.y = Q.y.z + temp;
Q.y.z -= temp;
Q.z.x = data->q.z * data->q.x;
temp = data->q.y * data->q_w;
Q.x.z = Q.z.x + temp;
Q.z.x -= temp;
point P = p;
P.x += 2.f * ( Q.x.x * p.x + Q.x.y * p.y + Q.x.z * p.z );
P.y += 2.f * ( Q.y.x * p.x + Q.y.y * p.y + Q.y.z * p.z );
P.z += 2.f * ( Q.z.x * p.x + Q.z.y * p.y + Q.z.z * p.z );
point N = RAYS_NORM( O, P );
P = N;
P.x += 2.f * ( Q.x.x * N.x + Q.y.x * N.y + Q.z.x * N.z );
P.y += 2.f * ( Q.x.y * N.x + Q.y.y * N.y + Q.z.y * N.z );
P.z += 2.f * ( Q.x.z * N.x + Q.y.z * N.y + Q.z.z * N.z );
return P;
} );
CREATE_OBJECT_TYPE_DEFINITION(
senfina_ripeto,
{
primitives::bazo_ptr o = obj + data->o;
point a = data->a;
scalar N = floorf( atomic::dot( a, p ) / atomic::dot( a, a ) + .5f );
a.x = p.x - N * a.x;
a.y = p.y - N * a.y;
a.z = p.z - N * a.z;
return RAYS_DIST( o, a );
},
{
primitives::bazo_ptr o = obj + data->o;
point a = data->a;
scalar N = floorf( atomic::dot( a, p ) / atomic::dot( a, a ) + .5f );
a.x = p.x - N * a.x;
a.y = p.y - N * a.y;
a.z = p.z - N * a.z;
return RAYS_NORM( o, a );
} );
};
// Kernels definitions
_KERNEL kernel_Process( const size_t *Width, const size_t *Height, const rays_info *Info_d, ray _PTR Rays,
primitives::bazo _PTR Primitives_d, const size_t *PrimitivesNum, cudaSurfaceObject_t Image ) {
coord
x = CUDA_RAYS_COORD_nD( x, 2 ),
y = CUDA_RAYS_COORD_nD( y, 2 ),
id = RAYS_PRIMITIVES_PER_THREAD * ( threadIdx.y * RAYS_BLOCK_2D_x + threadIdx.x ),
w = *Width, h = *Height;
// RAYS_BLOCK_2D_x * RAYS_BLOCK_2D_y * PRIMITIVES_PER_THREAD >= PrimitivesNum
__shared__ primitives::bazo curr_ptr[ RAYS_BLOCK_2D_x * RAYS_BLOCK_2D_y * RAYS_PRIMITIVES_PER_THREAD ];
if ( id < *PrimitivesNum ) {
primitives::bazo_ptr self = curr_ptr + id;
#pragma unroll
for ( size_t i = 0; i < RAYS_PRIMITIVES_PER_THREAD; ++i, ++self ) {
*self = Primitives_d[ id + i ];
//CREATE_OBJECT_TYPE_PROCESSING_LISTING_2( self );
}
}
__syncthreads();
if ( x < w && y < h ) {
scalar curr_dist, ray_dist = 0;
ray r = Rays[ y * w + x ];
uchar4 PIXEL = { 0x00, 0x00, 0x00, 0xff };
point curr_norm, light = Info_d->LightSource; //point{ 1.f, 0.f, 0.f };
#pragma unroll
for ( size_t I = 0; I < 500; ++I ) {
curr_dist = RAYS_DIST( curr_ptr, r.p );
if ( curr_dist < RAYS_MIN_DIST ) {
if ( curr_dist < -0.f ) {
curr_norm.x = -r.d.x;
curr_norm.y = -r.d.y;
curr_norm.z = -r.d.z;
} else {
curr_norm = RAYS_NORM( curr_ptr, r.p );
scalar R_1 = r_length_3( curr_norm.x, curr_norm.y, curr_norm.z );
curr_norm = atomic::mul_point( curr_norm, R_1 );
}
if ( atomic::dot( curr_norm, r.d ) < 0.f ) {
float N_L = atomic::dot( curr_norm, light );
float
SHADOW = 1.f,
OCCLUSION = 0.f,
SCA = 1.f;
point p = r.p;
ray_dist = RAYS_MIN_DIST;
#define DELTA 1
#define HARDNESS 128.f
p = r.p;
p.x += ( 1 + DELTA ) * RAYS_MIN_DIST * light.x;
p.y += ( 1 + DELTA ) * RAYS_MIN_DIST * light.y;
p.z += ( 1 + DELTA ) * RAYS_MIN_DIST * light.z;
ray_dist = ( 1 + DELTA ) * RAYS_MIN_DIST;
for ( size_t J = 0; J < 300; ++J ) {
curr_dist = RAYS_DIST( curr_ptr, p );
if ( 8 * curr_dist < RAYS_MIN_DIST ) {
if ( curr_dist < -0.f ) {
curr_norm.x = -light.x;
curr_norm.y = -light.y;
curr_norm.z = -light.z;
} else {
curr_norm = RAYS_NORM( curr_ptr, p );
scalar R_1 = r_length_3( curr_norm.x, curr_norm.y, curr_norm.z );
curr_norm = atomic::mul_point( curr_norm, R_1 );
}
if ( atomic::dot( curr_norm, r.d ) < RAYS_MIN_DIST ) {
SHADOW = 0.f;
break;
}
}
SHADOW = min( SHADOW, HARDNESS * curr_dist / ray_dist );
if ( SHADOW < 0.01f )
break;
p.x += curr_dist * light.x;
p.y += curr_dist * light.y;
p.z += curr_dist * light.z;
ray_dist += curr_dist;
// LIGHT
if ( ray_dist >= RAYS_MAX_DIST )
break;
}
float3 MATERIAL = { .5f, .7f, 1.f };
raw_byte LIGHT =
0xff * ( RAYS_MIN_LUM + ( RAYS_MAX_LUM - RAYS_MIN_LUM ) * ( N_L > 0.f ? N_L : 0.f ) * SHADOW );
PIXEL = {
raw_byte( LIGHT * MATERIAL.x ),
raw_byte( LIGHT * MATERIAL.y ),
raw_byte( LIGHT * MATERIAL.z ),
0xff
};
break;
}
}
r.p.x += curr_dist * r.d.x;
r.p.y += curr_dist * r.d.y;
r.p.z += curr_dist * r.d.z;
ray_dist += curr_dist;
if ( ray_dist >= RAYS_MAX_DIST ) {
break;
}
}
surf2Dwrite( PIXEL, Image, x * 4, y );
}
}
_KERNEL kernel_SetPrimitives( primitives::bazo _PTR Primitives, const size_t *PrimitivesNum ) {
coord x = CUDA_RAYS_COORD_nD( x, 1 );
if ( x < *PrimitivesNum ) {
primitives::bazo_ptr self = Primitives + x;
CREATE_OBJECT_TYPE_PROCESSING_LISTING_2( self )
}
}
_KERNEL kernel_SetRays( const size_t *Width, const size_t *Height, rays_info _PTR Info_d, ray _PTR Rays_d ) {
coord
x = CUDA_RAYS_COORD_nD( x, 2 ),
y = CUDA_RAYS_COORD_nD( y, 2 );
__shared__ rays_info Info[ 1 ];
if ( threadIdx.x == 0 && threadIdx.y == 0 )
Info[ 0 ] = *Info_d;
__syncthreads();
if ( x < *Width && y < *Height ) {
scalar
X = .5f * float( 2 * x - coord( *Width ) + 1 ),
Y = .5f * float( 2 * y - coord( *Height ) + 1 ),
Z = Info->Depth;
point pos;
pos.x = X * Info->StartWVec.x + Y * Info->StartHVec.x;
pos.y = X * Info->StartWVec.y + Y * Info->StartHVec.y;
pos.z = X * Info->StartWVec.z + Y * Info->StartHVec.z;
point delta_pos;
delta_pos.x = Z * Info->StartDir.x;
delta_pos.y = Z * Info->StartDir.y;
delta_pos.z = Z * Info->StartDir.z;
scalar R_1 = rnorm3df( pos.x + delta_pos.x, pos.y + delta_pos.y, pos.z + delta_pos.z );
ray *self = Rays_d + y * *Width + x;
self->d = atomic::mul_point( atomic::add_point( pos, delta_pos ), R_1 );
self->p = atomic::add_point( pos, Info->StartPos );
}
}
|
f912d8985a476811e8f7dc25c39fd9733c554047.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaUtils.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include "Panda.h"
#include "UserAPI.h"
#ifdef _WIN32
#include <windows.h>
#include <time.h>
#elif MACOS
#include <sys/param.h>
#include <sys/sysctl.h>
#elif __linux
#include <unistd.h>
#include <sys/time.h>
#endif
int getGPUCoresNum() {
//assert(tid<total);
int arch_cores_sm[3] = {1, 8, 32 };
hipDeviceProp_t gpu_dev;
int tid = 0;
hipGetDeviceProperties(&gpu_dev, tid);
int sm_per_multiproc = 1;
if (gpu_dev.major == 9999 && gpu_dev.minor == 9999)
sm_per_multiproc = 1;
else if (gpu_dev.major <=2)
sm_per_multiproc = arch_cores_sm[gpu_dev.major];
else
sm_per_multiproc = arch_cores_sm[2];
return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc));
//ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
}
int getCPUCoresNum() {
#ifdef WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
#elif MACOS
int nm[2];
size_t len = 4;
uint32_t count;
nm[0] = CTL_HW; nm[1] = HW_AVAILCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) {
nm[1] = HW_NCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) { count = 1; }
}
return count;
#elif __linux
return sysconf(_SC_NPROCESSORS_ONLN);
#endif
}
#ifndef __PANDAUTILS_CU__
#define __PANDAUTILS_CU__
void DoDiskLog(const char *str){
FILE *fptr;
char file_name[128];
sprintf(file_name,"%s","panda.log");
fptr = fopen(file_name,"a");
fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str);
//fprintf(fptr,"%s",__VA_ARGS__);
fclose(fptr);
//printf("\n");
}//void
double PandaTimer(){
#ifndef _WIN32
static struct timeval tv;
gettimeofday(&tv,NULL);
double curTime = tv.tv_sec + tv.tv_usec/1000000.0;
//ShowLog("\t Panda CurTime:%f", curTime);
return curTime;
#else
//newtime = localtime( &long_time2 );
double curTime = GetTickCount();
//ShowLog("\t Panda CurTime:%f", curTime);
curTime /=1000.0;
return curTime;
#endif
}
void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit((int)err);
}
}
__global__ void printData(gpu_context d_g_state ){
//printf("-----------printData TID:%d\n",TID);
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
int begin, end, val_pos, key_pos;
char *val_p,*key_p;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
begin=0;
end=0;
for (int i=0;i<map_task_idx;i++){
// begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len);
}//for
//end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
//keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos;
key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos;
val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos;
key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos;
//keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]);
//memcpy(key_p,p2->key,p2->keySize);
//memcpy(val_p,p2->val,p2->valSize);
printf("printData: TID:%d key: %s val:%d\n",TID,key_p,*(int *)val_p);
}//for
//if (index*recordsPerTask >= recordNum) return;
}//for
}//printData
#ifdef DEV_MODE
__global__ void printData2(gpu_context d_g_state ){
//printf("-----------printData TID:%d\n",TID);
//if(TID>=d_g_state.num_input_record)return;
//printf("printData2------------------------------%d\n",d_g_state.d_intermediate_keyval_arr_arr[TID].arr_len);
if (TID==0){
int keyPos = (d_g_state.d_input_keyval_pos_arr[0]).keyPos;
int valPos = (d_g_state.d_input_keyval_pos_arr[0]).valPos;
char *keyBuf = (char *)(d_g_state.d_input_keys_shared_buff)+keyPos;
MM_KEY_T *key = (MM_KEY_T*)keyBuf;
printf("Key2 =====================:%d\n",key->test);
for (int i=0;i<10;i++)
printf("%f ",key->matrix1[i]);
printf("\n");
for (int i=0;i<10;i++)
printf("%f ",key->matrix2[i]);
printf("\n");
for (int i=0;i<10;i++)
printf("%f ",key->matrix3[i]);
printf("\n");
}
//keyval_t * p1 = &(d_g_state.d_input_keyval_arr[TID]);
//int len = p1->valSize -1;
//((char *)(p1->val))[len] = '\0';
//printf("printData TID:%d keySize:%d key %d val:%s\n",TID,p1->keySize, *(int*)(p1->key), p1->val);
}//printData
#endif
__global__ void printData3(float *C ){
//if(TID==1){
printf("TID ==1 printC \n");
for (int i=0;i<10;i++){
printf("%f ",C[i]);
}
printf("\n");
//}
//printf("printData3 TID:%d key:%s",TID, p1->key);
//for (int i=0;i<p1->val_arr_len;i++)
// printf("printData3 :TID:%d, i:%d key:%s, val:%d\n",TID, i,p1->key, *(int*)p1->vals[i].val);
//printf("\n");
//printf("printData 3 TID:%d i:[%d] keySize:%d key %s val:%d\n",TID,i, p1->keySize, p1->key, *(int*)(p1->vals[i].val));
}//printData
//--------------------------------------------------------
//start_row_id a timer
//
//param : start_row_id_tv
//--------------------------------------------------------
/*
void start_row_idTimer(TimeVal_t *start_row_id_tv)
{
//gettimeofday((struct timeval*)start_row_id_tv, NULL);
}
*/
//--------------------------------------------------------
//end a timer, and print out a message
//
//param : msg message to print out
//param : start_row_id_tv
//--------------------------------------------------------
/*
void endTimer(char *msg, TimeVal_t *start_row_id_tv)
{
hipDeviceSynchronize();
struct timeval end_tv;
gettimeofday(&end_tv, NULL);
time_t sec = end_tv.tv_sec - start_row_id_tv->tv_sec;
time_t ms = end_tv.tv_usec - start_row_id_tv->tv_usec;
time_t diff = sec * 1000000 + ms;
//printf("%10s:\t\t%fms\n", msg, (double)((double)diff/1000.0));
}//void
*/
//----------------------------------------------------------
//print output records
//
//param: spec
//param: num -- maximum number of output records to print
//param: printFunc -- a function pointer
// void printFunc(void* key, void* val, int keySize, int valSize)
//----------------------------------------------------------
void PrintOutputRecords(Spec_t* spec, int num, PrintFunc_t printFunc)
{
/*
int maxNum = num;
if (maxNum > spec->outputRecordCount || maxNum < 0) maxNum = spec->outputRecordCount;
for (int i = 0; i < maxNum; ++i)
{
int4 index = spec->outputOffsetSizes[i];
printFunc((char*)spec->outputKeys + index.x, (char*)spec->outputVals + index.z, index.y, index.w);
}
*/
}//void
#endif //__PANDAUTILS_CU__ | f912d8985a476811e8f7dc25c39fd9733c554047.cu |
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaUtils.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include "Panda.h"
#include "UserAPI.h"
#ifdef _WIN32
#include <windows.h>
#include <time.h>
#elif MACOS
#include <sys/param.h>
#include <sys/sysctl.h>
#elif __linux
#include <unistd.h>
#include <sys/time.h>
#endif
int getGPUCoresNum() {
//assert(tid<total);
int arch_cores_sm[3] = {1, 8, 32 };
cudaDeviceProp gpu_dev;
int tid = 0;
cudaGetDeviceProperties(&gpu_dev, tid);
int sm_per_multiproc = 1;
if (gpu_dev.major == 9999 && gpu_dev.minor == 9999)
sm_per_multiproc = 1;
else if (gpu_dev.major <=2)
sm_per_multiproc = arch_cores_sm[gpu_dev.major];
else
sm_per_multiproc = arch_cores_sm[2];
return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc));
//ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
}
int getCPUCoresNum() {
#ifdef WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
#elif MACOS
int nm[2];
size_t len = 4;
uint32_t count;
nm[0] = CTL_HW; nm[1] = HW_AVAILCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) {
nm[1] = HW_NCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) { count = 1; }
}
return count;
#elif __linux
return sysconf(_SC_NPROCESSORS_ONLN);
#endif
}
#ifndef __PANDAUTILS_CU__
#define __PANDAUTILS_CU__
void DoDiskLog(const char *str){
FILE *fptr;
char file_name[128];
sprintf(file_name,"%s","panda.log");
fptr = fopen(file_name,"a");
fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str);
//fprintf(fptr,"%s",__VA_ARGS__);
fclose(fptr);
//printf("\n");
}//void
double PandaTimer(){
#ifndef _WIN32
static struct timeval tv;
gettimeofday(&tv,NULL);
double curTime = tv.tv_sec + tv.tv_usec/1000000.0;
//ShowLog("\t Panda CurTime:%f", curTime);
return curTime;
#else
//newtime = localtime( &long_time2 );
double curTime = GetTickCount();
//ShowLog("\t Panda CurTime:%f", curTime);
curTime /=1000.0;
return curTime;
#endif
}
void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit((int)err);
}
}
__global__ void printData(gpu_context d_g_state ){
//printf("-----------printData TID:%d\n",TID);
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
int begin, end, val_pos, key_pos;
char *val_p,*key_p;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
begin=0;
end=0;
for (int i=0;i<map_task_idx;i++){
// begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len);
}//for
//end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
//keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos;
key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos;
val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos;
key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos;
//keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]);
//memcpy(key_p,p2->key,p2->keySize);
//memcpy(val_p,p2->val,p2->valSize);
printf("printData: TID:%d key: %s val:%d\n",TID,key_p,*(int *)val_p);
}//for
//if (index*recordsPerTask >= recordNum) return;
}//for
}//printData
#ifdef DEV_MODE
__global__ void printData2(gpu_context d_g_state ){
//printf("-----------printData TID:%d\n",TID);
//if(TID>=d_g_state.num_input_record)return;
//printf("printData2------------------------------%d\n",d_g_state.d_intermediate_keyval_arr_arr[TID].arr_len);
if (TID==0){
int keyPos = (d_g_state.d_input_keyval_pos_arr[0]).keyPos;
int valPos = (d_g_state.d_input_keyval_pos_arr[0]).valPos;
char *keyBuf = (char *)(d_g_state.d_input_keys_shared_buff)+keyPos;
MM_KEY_T *key = (MM_KEY_T*)keyBuf;
printf("Key2 =====================:%d\n",key->test);
for (int i=0;i<10;i++)
printf("%f ",key->matrix1[i]);
printf("\n");
for (int i=0;i<10;i++)
printf("%f ",key->matrix2[i]);
printf("\n");
for (int i=0;i<10;i++)
printf("%f ",key->matrix3[i]);
printf("\n");
}
//keyval_t * p1 = &(d_g_state.d_input_keyval_arr[TID]);
//int len = p1->valSize -1;
//((char *)(p1->val))[len] = '\0';
//printf("printData TID:%d keySize:%d key %d val:%s\n",TID,p1->keySize, *(int*)(p1->key), p1->val);
}//printData
#endif
__global__ void printData3(float *C ){
//if(TID==1){
printf("TID ==1 printC \n");
for (int i=0;i<10;i++){
printf("%f ",C[i]);
}
printf("\n");
//}
//printf("printData3 TID:%d key:%s",TID, p1->key);
//for (int i=0;i<p1->val_arr_len;i++)
// printf("printData3 :TID:%d, i:%d key:%s, val:%d\n",TID, i,p1->key, *(int*)p1->vals[i].val);
//printf("\n");
//printf("printData 3 TID:%d i:[%d] keySize:%d key %s val:%d\n",TID,i, p1->keySize, p1->key, *(int*)(p1->vals[i].val));
}//printData
//--------------------------------------------------------
//start_row_id a timer
//
//param : start_row_id_tv
//--------------------------------------------------------
/*
void start_row_idTimer(TimeVal_t *start_row_id_tv)
{
//gettimeofday((struct timeval*)start_row_id_tv, NULL);
}
*/
//--------------------------------------------------------
//end a timer, and print out a message
//
//param : msg message to print out
//param : start_row_id_tv
//--------------------------------------------------------
/*
void endTimer(char *msg, TimeVal_t *start_row_id_tv)
{
cudaThreadSynchronize();
struct timeval end_tv;
gettimeofday(&end_tv, NULL);
time_t sec = end_tv.tv_sec - start_row_id_tv->tv_sec;
time_t ms = end_tv.tv_usec - start_row_id_tv->tv_usec;
time_t diff = sec * 1000000 + ms;
//printf("%10s:\t\t%fms\n", msg, (double)((double)diff/1000.0));
}//void
*/
//----------------------------------------------------------
//print output records
//
//param: spec
//param: num -- maximum number of output records to print
//param: printFunc -- a function pointer
// void printFunc(void* key, void* val, int keySize, int valSize)
//----------------------------------------------------------
void PrintOutputRecords(Spec_t* spec, int num, PrintFunc_t printFunc)
{
/*
int maxNum = num;
if (maxNum > spec->outputRecordCount || maxNum < 0) maxNum = spec->outputRecordCount;
for (int i = 0; i < maxNum; ++i)
{
int4 index = spec->outputOffsetSizes[i];
printFunc((char*)spec->outputKeys + index.x, (char*)spec->outputVals + index.z, index.y, index.w);
}
*/
}//void
#endif //__PANDAUTILS_CU__ |
6c8de81ad530980c383885fa12c209377548124f.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudaFun.h"
#include <cuda/cuda_runtime.h>
#include <malloc.h>
void allocateArray(float* h_arr, float* &d_arr, long totalsize){
size_t sizecpy = totalsize * sizeof(float);
hipMalloc((void **) &d_arr, sizecpy);
hipMemcpy(d_arr, h_arr, sizecpy, hipMemcpyHostToDevice);
}
void deallocateArray(float* &h_arr, float* d_arr, long totalsize){
size_t sizecpy = totalsize * sizeof(float);
h_arr = (float *)malloc(totalsize * sizeof(float));
hipMemcpy(h_arr, d_arr, sizecpy, hipMemcpyDeviceToHost);
}
void deleteDeviceArray(float *&d_arr, long totalsize){
hipFree(d_arr);
} | 6c8de81ad530980c383885fa12c209377548124f.cu | #include "cudaFun.h"
#include <cuda/cuda_runtime.h>
#include <malloc.h>
void allocateArray(float* h_arr, float* &d_arr, long totalsize){
size_t sizecpy = totalsize * sizeof(float);
cudaMalloc((void **) &d_arr, sizecpy);
cudaMemcpy(d_arr, h_arr, sizecpy, cudaMemcpyHostToDevice);
}
void deallocateArray(float* &h_arr, float* d_arr, long totalsize){
size_t sizecpy = totalsize * sizeof(float);
h_arr = (float *)malloc(totalsize * sizeof(float));
cudaMemcpy(h_arr, d_arr, sizecpy, cudaMemcpyDeviceToHost);
}
void deleteDeviceArray(float *&d_arr, long totalsize){
cudaFree(d_arr);
} |
ba892df23d978072705874168ffe163523c1c9f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
} | ba892df23d978072705874168ffe163523c1c9f1.cu | #include "includes.h"
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
} |
cb2df92fd96782c84003aa44388662e0d9bc223e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <hip/hip_runtime.h>
extern "C" __global__ void bench(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<n&&j<n){
int idx = i*n+j;
c[idx] = a[idx] + b[idx];
}
}
| cb2df92fd96782c84003aa44388662e0d9bc223e.cu | #include <stdint.h>
#include <cuda.h>
extern "C" __global__ void bench(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<n&&j<n){
int idx = i*n+j;
c[idx] = a[idx] + b[idx];
}
}
|
2fa253187481c3befbfc762845e6ca40d043256d.hip | // !!! This is a file automatically generated by hipify!!!
#include "opencv2/highgui/highgui.hpp"
#include "opencv/cv.h"
int FastSobel(unsigned char *in, int width, int height, int widthStep,
unsigned char *edg, unsigned char *ang)
{
int i,j;
unsigned char *inPtr = NULL;
unsigned char *inPtr1 = NULL;
unsigned char *inPtr2 = NULL;
unsigned char *inPtr3 = NULL;
unsigned char *inPtr4 = NULL;
unsigned char *inPtr5 = NULL;
unsigned char *inPtr6 = NULL;
int *pEdgeX = (int *)calloc(width*height, sizeof(int));
int *pEdgeY = (int *)calloc(width*height, sizeof(int));
int *pEdgeXPtr = NULL;
int *pEdgeYPtr = NULL;
unsigned char *angPtr = NULL;
unsigned char *edgPtr = NULL;
// this is heuristic, and it should add receptive area
int widH = 1;
int widV = 1;
widV *= widthStep;
for (i=1;i<height-1;i++)
{
pEdgeXPtr = pEdgeX + i*width + 1;
inPtr = in + i * widthStep + 1;
inPtr1 = inPtr + widH - widV;
inPtr2 = inPtr + widH;
inPtr3 = inPtr + widH + widV;
inPtr4 = inPtr - widH - widV;
inPtr5 = inPtr - widH;
inPtr6 = inPtr - widH + widV;
for (j=1;j<width-1;j++, pEdgeXPtr++)
{
*pEdgeXPtr
= (*inPtr1++ * 1 + (int)*inPtr2++ * 2 + *inPtr3++ * 1)
- (*inPtr4++ * 1 + (int)*inPtr5++ * 2 + *inPtr6++ * 1);
}
}
for (i=1;i<height-1;i++)
{
pEdgeYPtr = pEdgeY + i*width + 1;
inPtr = in + i * widthStep + 1;
inPtr1 = inPtr + widV - widH;
inPtr2 = inPtr + widV;
inPtr3 = inPtr + widV + widH;
inPtr4 = inPtr - widV - widH;
inPtr5 = inPtr - widV;
inPtr6 = inPtr - widV + widH;
for (j=1;j<width-1;j++, pEdgeYPtr++)
{
*pEdgeYPtr
= (*inPtr1++ * 1 + (int)*inPtr2++ * 2 + *inPtr3++ * 1)
- (*inPtr4++ * 1 + (int)*inPtr5++ * 2 + *inPtr6++ * 1);
}
}
for (i=1;i<height-1;i++)
{
pEdgeXPtr = pEdgeX + i*width + 1;
pEdgeYPtr = pEdgeY + i*width + 1;
angPtr = ang + i * widthStep + 1;
edgPtr = edg + i * widthStep + 1;
for (j=1; j<width-1; j++, pEdgeYPtr++, pEdgeXPtr++, angPtr++, edgPtr++)
{
*angPtr = atan2((double)*pEdgeYPtr,(double)*pEdgeXPtr)*180/3.141592654;
*edgPtr = ::min(255.0f,sqrt((float)*pEdgeXPtr**pEdgeXPtr + (float)*pEdgeYPtr**pEdgeYPtr)/2.0f);
}
}
free(pEdgeY); pEdgeY = NULL;
free(pEdgeX); pEdgeX = NULL;
return 0;
}
int main(int argc, char* argv[])
{
IplImage * img = cvLoadImage("1.jpg",1);
IplImage *grayImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
IplImage *gradientImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
IplImage *anglImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
cvCvtColor(img, grayImage, CV_BGR2GRAY); // color to gray
double t = cvGetTickCount();
FastSobel((unsigned char*)grayImage->imageData, grayImage->width, grayImage->height, grayImage->widthStep,
(unsigned char*)gradientImage->imageData, (unsigned char*)anglImage->imageData);
t = (cvGetTickCount()-t)/1000000;
printf("time: %4.4f\n", t);
cvNamedWindow("grayImage",1);
cvShowImage("grayImage",grayImage);
cvNamedWindow("gradientImage",1);
cvShowImage("gradientImage",gradientImage);
cvWaitKey(0);
cvReleaseImage(&img);
cvReleaseImage(&grayImage);
cvReleaseImage(&gradientImage);
cvReleaseImage(&anglImage);
return 0;
} | 2fa253187481c3befbfc762845e6ca40d043256d.cu | #include "opencv2/highgui/highgui.hpp"
#include "opencv/cv.h"
int FastSobel(unsigned char *in, int width, int height, int widthStep,
unsigned char *edg, unsigned char *ang)
{
int i,j;
unsigned char *inPtr = NULL;
unsigned char *inPtr1 = NULL;
unsigned char *inPtr2 = NULL;
unsigned char *inPtr3 = NULL;
unsigned char *inPtr4 = NULL;
unsigned char *inPtr5 = NULL;
unsigned char *inPtr6 = NULL;
int *pEdgeX = (int *)calloc(width*height, sizeof(int));
int *pEdgeY = (int *)calloc(width*height, sizeof(int));
int *pEdgeXPtr = NULL;
int *pEdgeYPtr = NULL;
unsigned char *angPtr = NULL;
unsigned char *edgPtr = NULL;
// this is heuristic, and it should add receptive area
int widH = 1;
int widV = 1;
widV *= widthStep;
for (i=1;i<height-1;i++)
{
pEdgeXPtr = pEdgeX + i*width + 1;
inPtr = in + i * widthStep + 1;
inPtr1 = inPtr + widH - widV;
inPtr2 = inPtr + widH;
inPtr3 = inPtr + widH + widV;
inPtr4 = inPtr - widH - widV;
inPtr5 = inPtr - widH;
inPtr6 = inPtr - widH + widV;
for (j=1;j<width-1;j++, pEdgeXPtr++)
{
*pEdgeXPtr
= (*inPtr1++ * 1 + (int)*inPtr2++ * 2 + *inPtr3++ * 1)
- (*inPtr4++ * 1 + (int)*inPtr5++ * 2 + *inPtr6++ * 1);
}
}
for (i=1;i<height-1;i++)
{
pEdgeYPtr = pEdgeY + i*width + 1;
inPtr = in + i * widthStep + 1;
inPtr1 = inPtr + widV - widH;
inPtr2 = inPtr + widV;
inPtr3 = inPtr + widV + widH;
inPtr4 = inPtr - widV - widH;
inPtr5 = inPtr - widV;
inPtr6 = inPtr - widV + widH;
for (j=1;j<width-1;j++, pEdgeYPtr++)
{
*pEdgeYPtr
= (*inPtr1++ * 1 + (int)*inPtr2++ * 2 + *inPtr3++ * 1)
- (*inPtr4++ * 1 + (int)*inPtr5++ * 2 + *inPtr6++ * 1);
}
}
for (i=1;i<height-1;i++)
{
pEdgeXPtr = pEdgeX + i*width + 1;
pEdgeYPtr = pEdgeY + i*width + 1;
angPtr = ang + i * widthStep + 1;
edgPtr = edg + i * widthStep + 1;
for (j=1; j<width-1; j++, pEdgeYPtr++, pEdgeXPtr++, angPtr++, edgPtr++)
{
*angPtr = atan2((double)*pEdgeYPtr,(double)*pEdgeXPtr)*180/3.141592654;
*edgPtr = std::min(255.0f,sqrt((float)*pEdgeXPtr**pEdgeXPtr + (float)*pEdgeYPtr**pEdgeYPtr)/2.0f);
}
}
free(pEdgeY); pEdgeY = NULL;
free(pEdgeX); pEdgeX = NULL;
return 0;
}
int main(int argc, char* argv[])
{
IplImage * img = cvLoadImage("1.jpg",1);
IplImage *grayImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
IplImage *gradientImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
IplImage *anglImage = cvCreateImage(cvSize(img->width, img->height), 8, 1);
cvCvtColor(img, grayImage, CV_BGR2GRAY); // color to gray
double t = cvGetTickCount();
FastSobel((unsigned char*)grayImage->imageData, grayImage->width, grayImage->height, grayImage->widthStep,
(unsigned char*)gradientImage->imageData, (unsigned char*)anglImage->imageData);
t = (cvGetTickCount()-t)/1000000;
printf("time: %4.4f\n", t);
cvNamedWindow("grayImage",1);
cvShowImage("grayImage",grayImage);
cvNamedWindow("gradientImage",1);
cvShowImage("gradientImage",gradientImage);
cvWaitKey(0);
cvReleaseImage(&img);
cvReleaseImage(&grayImage);
cvReleaseImage(&gradientImage);
cvReleaseImage(&anglImage);
return 0;
} |
ab746305835c1802668bab0f0d5b4df99bbd3875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FixLJCut.h"
#include "BoundsGPU.h"
#include "GridGPU.h"
#include "list_macro.h"
#include "PairEvaluateIso.h"
#include "State.h"
#include "cutils_func.h"
const std::string LJCutType = "LJCut";
FixLJCut::FixLJCut(boost::shared_ptr<State> state_, std::string handle_)
: FixPair(state_, handle_, "all", LJCutType, true, 1),
epsHandle("eps"), sigHandle("sig"), rCutHandle("rCut")
{
initializeParameters(epsHandle, epsilons);
initializeParameters(sigHandle, sigmas);
initializeParameters(rCutHandle, rCuts);
paramOrder = {epsHandle, sigHandle, rCutHandle};
}
void FixLJCut::compute(bool computeVirials) {
int nAtoms = state->atoms.size();
int numTypes = state->atomParams.numTypes;
GPUData &gpd = state->gpd;
GridGPU &grid = state->gridGPU;
int activeIdx = gpd.activeIdx();
uint16_t *neighborCounts = grid.perAtomArray.d_data.data();
float *neighborCoefs = state->specialNeighborCoefs;
hipLaunchKernelGGL(( compute_force_iso<EvaluatorLJ, 3>) , dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), 3*numTypes*numTypes*sizeof(float), 0,
nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx),
neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(),
state->devManager.prop.warpSize, paramsCoalesced.data(), numTypes, state->boundsGPU,
neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], evaluator);
}
void FixLJCut::singlePointEng(float *perParticleEng) {
int nAtoms = state->atoms.size();
int numTypes = state->atomParams.numTypes;
GPUData &gpd = state->gpd;
GridGPU &grid = state->gridGPU;
int activeIdx = gpd.activeIdx();
uint16_t *neighborCounts = grid.perAtomArray.d_data.data();
float *neighborCoefs = state->specialNeighborCoefs;
hipLaunchKernelGGL(( compute_energy_iso<EvaluatorLJ, 3>), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), 3*numTypes*numTypes*sizeof(float), 0, nAtoms, gpd.xs(activeIdx), perParticleEng, neighbor\
Counts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, paramsCoalesced.data(), numTypes, state->boundsGPU, ne\
ighborCoefs[0], neighborCoefs[1], neighborCoefs[2], evaluator);
}
bool FixLJCut::prepareForRun() {
//loop through all params and fill with appropriate lambda function, then send all to device
auto fillEps = [] (float a, float b) {
return sqrt(a*b);
};
auto fillSig = [] (float a, float b) {
return (a+b) / 2.0;
};
auto fillRCut = [this] (float a, float b) {
return (float) std::fmax(a, b);
};
auto none = [] (float a){};
auto fillRCutDiag = [this] () {
return (float) state->rCut;
};
auto processEps = [] (float a) {
return 24*a;
};
auto processSig = [] (float a) {
return pow(a, 6);
};
auto processRCut = [] (float a) {
return a*a;
};
prepareParameters(epsHandle, fillEps, processEps, false);
prepareParameters(sigHandle, fillSig, processSig, false);
prepareParameters(rCutHandle, fillRCut, processRCut, true, fillRCutDiag);
sendAllToDevice();
return true;
}
std::string FixLJCut::restartChunk(std::string format) {
std::stringstream ss;
ss << restartChunkPairParams(format);
return ss.str();
}
bool FixLJCut::readFromRestart(pugi::xml_node restData) {
std::cout << "Reading form restart" << std::endl;
auto curr_param = restData.first_child();
while (curr_param) {
if (curr_param.name() == "parameter") {
std::vector<float> val;
std::string paramHandle = curr_param.attribute("handle").value();
std::string s;
std::istringstream ss(curr_param.value());
while (ss >> s) {
val.push_back(atof(s.c_str()));
}
initializeParameters(paramHandle, val);
}
curr_param = curr_param.next_sibling();
}
std::cout << "Reading LJ parameters from restart" << std::endl;
return true;
}
bool FixLJCut::postRun() {
resetToPreproc(sigHandle);
resetToPreproc(epsHandle);
resetToPreproc(rCutHandle);
return true;
}
void FixLJCut::addSpecies(std::string handle) {
initializeParameters(epsHandle, epsilons);
initializeParameters(sigHandle, sigmas);
initializeParameters(rCutHandle, rCuts);
}
std::vector<float> FixLJCut::getRCuts() { // to be called after prepare. These are squares now
return LISTMAP(float, float, rc, rCuts, sqrt(rc));
}
void export_FixLJCut() {
boost::python::class_<FixLJCut,
boost::shared_ptr<FixLJCut>,
boost::python::bases<FixPair>, boost::noncopyable > (
"FixLJCut",
boost::python::init<boost::shared_ptr<State>, std::string> (
boost::python::args("state", "handle"))
);
}
| ab746305835c1802668bab0f0d5b4df99bbd3875.cu | #include "FixLJCut.h"
#include "BoundsGPU.h"
#include "GridGPU.h"
#include "list_macro.h"
#include "PairEvaluateIso.h"
#include "State.h"
#include "cutils_func.h"
const std::string LJCutType = "LJCut";
FixLJCut::FixLJCut(boost::shared_ptr<State> state_, std::string handle_)
: FixPair(state_, handle_, "all", LJCutType, true, 1),
epsHandle("eps"), sigHandle("sig"), rCutHandle("rCut")
{
initializeParameters(epsHandle, epsilons);
initializeParameters(sigHandle, sigmas);
initializeParameters(rCutHandle, rCuts);
paramOrder = {epsHandle, sigHandle, rCutHandle};
}
void FixLJCut::compute(bool computeVirials) {
int nAtoms = state->atoms.size();
int numTypes = state->atomParams.numTypes;
GPUData &gpd = state->gpd;
GridGPU &grid = state->gridGPU;
int activeIdx = gpd.activeIdx();
uint16_t *neighborCounts = grid.perAtomArray.d_data.data();
float *neighborCoefs = state->specialNeighborCoefs;
compute_force_iso<EvaluatorLJ, 3> <<<NBLOCK(nAtoms), PERBLOCK, 3*numTypes*numTypes*sizeof(float)>>>(
nAtoms, gpd.xs(activeIdx), gpd.fs(activeIdx),
neighborCounts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(),
state->devManager.prop.warpSize, paramsCoalesced.data(), numTypes, state->boundsGPU,
neighborCoefs[0], neighborCoefs[1], neighborCoefs[2], evaluator);
}
void FixLJCut::singlePointEng(float *perParticleEng) {
int nAtoms = state->atoms.size();
int numTypes = state->atomParams.numTypes;
GPUData &gpd = state->gpd;
GridGPU &grid = state->gridGPU;
int activeIdx = gpd.activeIdx();
uint16_t *neighborCounts = grid.perAtomArray.d_data.data();
float *neighborCoefs = state->specialNeighborCoefs;
compute_energy_iso<EvaluatorLJ, 3><<<NBLOCK(nAtoms), PERBLOCK, 3*numTypes*numTypes*sizeof(float)>>>(nAtoms, gpd.xs(activeIdx), perParticleEng, neighbor\
Counts, grid.neighborlist.data(), grid.perBlockArray.d_data.data(), state->devManager.prop.warpSize, paramsCoalesced.data(), numTypes, state->boundsGPU, ne\
ighborCoefs[0], neighborCoefs[1], neighborCoefs[2], evaluator);
}
bool FixLJCut::prepareForRun() {
//loop through all params and fill with appropriate lambda function, then send all to device
auto fillEps = [] (float a, float b) {
return sqrt(a*b);
};
auto fillSig = [] (float a, float b) {
return (a+b) / 2.0;
};
auto fillRCut = [this] (float a, float b) {
return (float) std::fmax(a, b);
};
auto none = [] (float a){};
auto fillRCutDiag = [this] () {
return (float) state->rCut;
};
auto processEps = [] (float a) {
return 24*a;
};
auto processSig = [] (float a) {
return pow(a, 6);
};
auto processRCut = [] (float a) {
return a*a;
};
prepareParameters(epsHandle, fillEps, processEps, false);
prepareParameters(sigHandle, fillSig, processSig, false);
prepareParameters(rCutHandle, fillRCut, processRCut, true, fillRCutDiag);
sendAllToDevice();
return true;
}
std::string FixLJCut::restartChunk(std::string format) {
std::stringstream ss;
ss << restartChunkPairParams(format);
return ss.str();
}
bool FixLJCut::readFromRestart(pugi::xml_node restData) {
std::cout << "Reading form restart" << std::endl;
auto curr_param = restData.first_child();
while (curr_param) {
if (curr_param.name() == "parameter") {
std::vector<float> val;
std::string paramHandle = curr_param.attribute("handle").value();
std::string s;
std::istringstream ss(curr_param.value());
while (ss >> s) {
val.push_back(atof(s.c_str()));
}
initializeParameters(paramHandle, val);
}
curr_param = curr_param.next_sibling();
}
std::cout << "Reading LJ parameters from restart" << std::endl;
return true;
}
bool FixLJCut::postRun() {
resetToPreproc(sigHandle);
resetToPreproc(epsHandle);
resetToPreproc(rCutHandle);
return true;
}
void FixLJCut::addSpecies(std::string handle) {
initializeParameters(epsHandle, epsilons);
initializeParameters(sigHandle, sigmas);
initializeParameters(rCutHandle, rCuts);
}
std::vector<float> FixLJCut::getRCuts() { // to be called after prepare. These are squares now
return LISTMAP(float, float, rc, rCuts, sqrt(rc));
}
void export_FixLJCut() {
boost::python::class_<FixLJCut,
boost::shared_ptr<FixLJCut>,
boost::python::bases<FixPair>, boost::noncopyable > (
"FixLJCut",
boost::python::init<boost::shared_ptr<State>, std::string> (
boost::python::args("state", "handle"))
);
}
|
253b9b48ef22b544afd51fd2a97e6ba2f3be131b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "mish.h"
namespace nvinfer1
{
// create the plugin at runtime from a byte stream
MishPlugin::MishPlugin(const void* data, size_t length)
{
assert(length == sizeof(input_size_));
input_size_ = *reinterpret_cast<const int*>(data);
}
void MishPlugin::serialize(void* buffer) const noexcept
{
*reinterpret_cast<int*>(buffer) = input_size_;
}
size_t MishPlugin::getSerializationSize() const noexcept
{
return sizeof(input_size_);
}
int MishPlugin::initialize() noexcept
{
return 0;
}
bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept
{
return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR);
}
void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs,
const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept
{
}
Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept
{
assert(nbInputDims == 1);
assert(index == 0);
input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2];
// Output dimensions
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept
{
mPluginNamespace = pluginNamespace;
}
const char* MishPlugin::getPluginNamespace() const noexcept
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept
{
return false;
}
void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept
{
}
// Detach the plugin object from its execution context.
void MishPlugin::detachFromContext()noexcept {}
const char* MishPlugin::getPluginType() const noexcept
{
return "Mish_TRT";
}
const char* MishPlugin::getPluginVersion() const noexcept
{
return "1";
}
void MishPlugin::destroy()noexcept
{
delete this;
}
// Clone the plugin
IPluginV2* MishPlugin::clone() const noexcept
{
MishPlugin *p = new MishPlugin();
p->input_size_ = input_size_;
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float softplus_kernel(float x, float threshold = 20)
{
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return logf(expf(x) + 1);
}
__global__ void mish_kernel(const float *input, float *output, int num_elem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
//float t = exp(input[idx]);
//if (input[idx] > 20.0) {
// t *= t;
// output[idx] = (t - 1.0) / (t + 1.0);
//} else {
// float tt = t * t;
// output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0);
//}
//output[idx] *= input[idx];
output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx]));
}
void MishPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize)
{
int block_size = thread_count_;
int grid_size = (input_size_ * batchSize + block_size - 1) / block_size;
hipLaunchKernelGGL(( mish_kernel), dim3(grid_size), dim3(block_size), 0, stream, inputs[0], output, input_size_ * batchSize);
}
int MishPlugin::enqueue(int batchSize,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) noexcept
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
int MishPlugin::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
hipStream_t stream) noexcept
{
return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream);
}
PluginFieldCollection MishPluginCreator::mFC{};
std::vector<PluginField> MishPluginCreator::mPluginAttributes;
MishPluginCreator::MishPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* MishPluginCreator::getPluginName() const noexcept
{
return "Mish_TRT";
}
const char* MishPluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept
{
return &mFC;
}
IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept
{
MishPlugin* obj = new MishPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
MishPlugin* obj = new MishPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept
{
mNamespace = libNamespace;
}
const char* MishPluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
}
| 253b9b48ef22b544afd51fd2a97e6ba2f3be131b.cu | #include <cmath>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include "mish.h"
namespace nvinfer1
{
// create the plugin at runtime from a byte stream
MishPlugin::MishPlugin(const void* data, size_t length)
{
assert(length == sizeof(input_size_));
input_size_ = *reinterpret_cast<const int*>(data);
}
void MishPlugin::serialize(void* buffer) const noexcept
{
*reinterpret_cast<int*>(buffer) = input_size_;
}
size_t MishPlugin::getSerializationSize() const noexcept
{
return sizeof(input_size_);
}
int MishPlugin::initialize() noexcept
{
return 0;
}
bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept
{
return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR);
}
void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs,
const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept
{
}
Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept
{
assert(nbInputDims == 1);
assert(index == 0);
input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2];
// Output dimensions
return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]);
}
// Set plugin namespace
void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept
{
mPluginNamespace = pluginNamespace;
}
const char* MishPlugin::getPluginNamespace() const noexcept
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept
{
return false;
}
void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept
{
}
// Detach the plugin object from its execution context.
void MishPlugin::detachFromContext()noexcept {}
const char* MishPlugin::getPluginType() const noexcept
{
return "Mish_TRT";
}
const char* MishPlugin::getPluginVersion() const noexcept
{
return "1";
}
void MishPlugin::destroy()noexcept
{
delete this;
}
// Clone the plugin
IPluginV2* MishPlugin::clone() const noexcept
{
MishPlugin *p = new MishPlugin();
p->input_size_ = input_size_;
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float softplus_kernel(float x, float threshold = 20)
{
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return logf(expf(x) + 1);
}
__global__ void mish_kernel(const float *input, float *output, int num_elem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
//float t = exp(input[idx]);
//if (input[idx] > 20.0) {
// t *= t;
// output[idx] = (t - 1.0) / (t + 1.0);
//} else {
// float tt = t * t;
// output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0);
//}
//output[idx] *= input[idx];
output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx]));
}
void MishPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize)
{
int block_size = thread_count_;
int grid_size = (input_size_ * batchSize + block_size - 1) / block_size;
mish_kernel<<<grid_size, block_size, 0, stream>>>(inputs[0], output, input_size_ * batchSize);
}
int MishPlugin::enqueue(int batchSize,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) noexcept
{
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
int MishPlugin::enqueue(int batchSize,
const void* const* inputs,
void** outputs,
void* workspace,
cudaStream_t stream) noexcept
{
return enqueue(batchSize, inputs, (void* const*)outputs, workspace, stream);
}
PluginFieldCollection MishPluginCreator::mFC{};
std::vector<PluginField> MishPluginCreator::mPluginAttributes;
MishPluginCreator::MishPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* MishPluginCreator::getPluginName() const noexcept
{
return "Mish_TRT";
}
const char* MishPluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept
{
return &mFC;
}
IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept
{
MishPlugin* obj = new MishPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
MishPlugin* obj = new MishPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept
{
mNamespace = libNamespace;
}
const char* MishPluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
}
|
87c45ee9e337f4bda5b90d0da7b126db3287e007.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if CUDART_VERSION >= 10010
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct EigSelInputs {
T tolerance;
int len;
int n;
int n_eigen_vals;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const EigSelInputs<T>& dims)
{
return os;
}
template <typename T>
class EigSelTest : public ::testing::TestWithParam<EigSelInputs<T>> {
public:
EigSelTest()
: params(::testing::TestWithParam<EigSelInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
cov_matrix(params.len, stream),
eig_vectors(params.n_eigen_vals * params.n, stream),
eig_vectors_ref(params.n_eigen_vals * params.n, stream),
eig_vals(params.n, stream),
eig_vals_ref(params.n, stream)
{
}
protected:
void SetUp() override
{
int len = params.len;
///@todo: Generate a random symmetric matrix
T cov_matrix_h[] = {
1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
raft::update_device(cov_matrix.data(), cov_matrix_h, len, stream);
T eig_vectors_ref_h[] = {-0.5123,
0.4874,
0.4874,
-0.5123,
0.6498,
0.2789,
-0.2789,
-0.6498,
0.4874,
0.5123,
0.5123,
0.4874};
T eig_vals_ref_h[] = {0.1024, 0.3096, 3.5266, 0.0};
raft::update_device(
eig_vectors_ref.data(), eig_vectors_ref_h, params.n_eigen_vals * params.n, stream);
raft::update_device(eig_vals_ref.data(), eig_vals_ref_h, params.n_eigen_vals, stream);
auto cov_matrix_view = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
cov_matrix.data(), params.n, params.n);
auto eig_vectors_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors.data(), params.n_eigen_vals, params.n);
auto eig_vals_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals.data(), params.n_eigen_vals);
raft::linalg::eig_dc_selective(handle,
cov_matrix_view,
eig_vectors_view,
eig_vals_view,
static_cast<std::size_t>(params.n_eigen_vals),
EigVecMemUsage::OVERWRITE_INPUT);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
hipStream_t stream;
EigSelInputs<T> params;
rmm::device_uvector<T> cov_matrix;
rmm::device_uvector<T> eig_vectors;
rmm::device_uvector<T> eig_vectors_ref;
rmm::device_uvector<T> eig_vals;
rmm::device_uvector<T> eig_vals_ref;
};
const std::vector<EigSelInputs<float>> inputsf2 = {{0.001f, 4 * 4, 4, 3}};
const std::vector<EigSelInputs<double>> inputsd2 = {{0.001, 4 * 4, 4, 3}};
typedef EigSelTest<float> EigSelTestValF;
TEST_P(EigSelTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestValD;
TEST_P(EigSelTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigSelTest<float> EigSelTestVecF;
TEST_P(EigSelTestVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestVecD;
TEST_P(EigSelTestVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
#endif
| 87c45ee9e337f4bda5b90d0da7b126db3287e007.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if CUDART_VERSION >= 10010
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/eig.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
struct EigSelInputs {
T tolerance;
int len;
int n;
int n_eigen_vals;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const EigSelInputs<T>& dims)
{
return os;
}
template <typename T>
class EigSelTest : public ::testing::TestWithParam<EigSelInputs<T>> {
public:
EigSelTest()
: params(::testing::TestWithParam<EigSelInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
cov_matrix(params.len, stream),
eig_vectors(params.n_eigen_vals * params.n, stream),
eig_vectors_ref(params.n_eigen_vals * params.n, stream),
eig_vals(params.n, stream),
eig_vals_ref(params.n, stream)
{
}
protected:
void SetUp() override
{
int len = params.len;
///@todo: Generate a random symmetric matrix
T cov_matrix_h[] = {
1.0, 0.9, 0.81, 0.729, 0.9, 1.0, 0.9, 0.81, 0.81, 0.9, 1.0, 0.9, 0.729, 0.81, 0.9, 1.0};
ASSERT(len == 16, "This test only works with 4x4 matrices!");
raft::update_device(cov_matrix.data(), cov_matrix_h, len, stream);
T eig_vectors_ref_h[] = {-0.5123,
0.4874,
0.4874,
-0.5123,
0.6498,
0.2789,
-0.2789,
-0.6498,
0.4874,
0.5123,
0.5123,
0.4874};
T eig_vals_ref_h[] = {0.1024, 0.3096, 3.5266, 0.0};
raft::update_device(
eig_vectors_ref.data(), eig_vectors_ref_h, params.n_eigen_vals * params.n, stream);
raft::update_device(eig_vals_ref.data(), eig_vals_ref_h, params.n_eigen_vals, stream);
auto cov_matrix_view = raft::make_device_matrix_view<const T, std::uint32_t, raft::col_major>(
cov_matrix.data(), params.n, params.n);
auto eig_vectors_view = raft::make_device_matrix_view<T, std::uint32_t, raft::col_major>(
eig_vectors.data(), params.n_eigen_vals, params.n);
auto eig_vals_view =
raft::make_device_vector_view<T, std::uint32_t>(eig_vals.data(), params.n_eigen_vals);
raft::linalg::eig_dc_selective(handle,
cov_matrix_view,
eig_vectors_view,
eig_vals_view,
static_cast<std::size_t>(params.n_eigen_vals),
EigVecMemUsage::OVERWRITE_INPUT);
resource::sync_stream(handle);
}
protected:
raft::resources handle;
cudaStream_t stream;
EigSelInputs<T> params;
rmm::device_uvector<T> cov_matrix;
rmm::device_uvector<T> eig_vectors;
rmm::device_uvector<T> eig_vectors_ref;
rmm::device_uvector<T> eig_vals;
rmm::device_uvector<T> eig_vals_ref;
};
const std::vector<EigSelInputs<float>> inputsf2 = {{0.001f, 4 * 4, 4, 3}};
const std::vector<EigSelInputs<double>> inputsd2 = {{0.001, 4 * 4, 4, 3}};
typedef EigSelTest<float> EigSelTestValF;
TEST_P(EigSelTestValF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestValD;
TEST_P(EigSelTestValD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vals_ref.data(),
eig_vals.data(),
params.n_eigen_vals,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
typedef EigSelTest<float> EigSelTestVecF;
TEST_P(EigSelTestVecF, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<float>(params.tolerance),
stream));
}
typedef EigSelTest<double> EigSelTestVecD;
TEST_P(EigSelTestVecD, Result)
{
ASSERT_TRUE(raft::devArrMatch(eig_vectors_ref.data(),
eig_vectors.data(),
params.n_eigen_vals * params.n,
raft::CompareApproxAbs<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_SUITE_P(EigSelTest, EigSelTestVecD, ::testing::ValuesIn(inputsd2));
} // end namespace linalg
} // end namespace raft
#endif
|
b2f42cfbbf6ac66c2c10e4b2e27f8d2131a16321.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MUMMERGPU_KERNEL_H_
#define _MUMMERGPU_KERNEL_H_
#include <stdio.h>
#include <common.cu>
#ifdef n__DEVICE_EMULATION__
#define XPRINTF(...) printf(__VA_ARGS__)
#define VERBOSE 0
#else
#define XPRINTF(...) do{}while(0)
#define VERBOSE 0
#endif
#define WARP_SIZE 16
#if REORDER_TREE
#define fNID "%d,%d"
#define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16)
#define GOROOT(addr) addr = 0x00010000
//#define GOROOT(addr) addr.x = 0; addr.y = 1
#else
#define fNID "%d"
#define NID(addr) addr
#define GOROOT(addr) addr = 1
#endif
#if COALESCED_QUERIES
#define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3)
#elif QRYTEX
#define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos)
#else
#define GETQCHAR(qrypos) queries[qrypos]
#endif
#if COALESCED_QUERIES
#define RESULT_SPAN WARP_SIZE
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid]
#else
#define RESULT_SPAN 1
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1)
#endif
#if REFTEX
#define GETRCHAR(refpos) getRef(refpos)
#else
#define GETRCHAR(refpos) getRef(refpos, ref)
#endif
#if MERGETEX
#if TREE_ACCESS_HISTOGRAM
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist)
#endif
#else
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#endif
#endif
#else
#if NODETEX
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level)
#define GETNODE(addr, two_level) getNode(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes)
#endif
#endif
#if CHILDTEX
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr)
#endif
#endif
#endif
#if QRYTEX
#define SHIFT_QUERIES(queries, qryAddr)
#else
#define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr
#endif
#if REORDER_TREE
texture<uint4, 2, hipReadModeElementType> nodetex;
texture<uint4, 2, hipReadModeElementType> childrentex;
#else
texture<uint4, 1, hipReadModeElementType> nodetex;
texture<uint4, 1, hipReadModeElementType> childrentex;
#endif
#if REORDER_REF
texture<char, 2, hipReadModeElementType> reftex;
#else
texture<char, 1, hipReadModeElementType> reftex;
#endif
texture<char, 1, hipReadModeElementType> qrytex;
struct __align__(8) _MatchCoord
{
union
{
int2 data;
struct
{
int node; // match node
int edge_match_length; // number of matching characters UP the parent edge
};
};
};
// If leafchar is 0, store the ACGT$ links, else store the leafid
struct _PixelOfChildren
{
union
{
uint4 data;
union
{
struct
{
uchar3 a;
uchar3 c;
uchar3 g;
uchar3 t;
uchar3 d;
char leafchar;
};
struct
{
uchar3 leafid;
unsigned char pad [12];
char leafchar0;
};
};
};
};
// Store the start, end coordinate of node, and the parent, suffix links
struct _PixelOfNode
{
union
{
uint4 data;
struct
{
uchar3 parent;
uchar3 suffix;
uchar3 start;
uchar3 end;
uchar3 depth;
unsigned char pad;
};
};
};
#if TWO_LEVEL_CHILD_TREE
#define CHILD_THRESH 128
__constant__ _PixelOfChildren child_tree_top[CHILD_THRESH];
#endif
#if TWO_LEVEL_NODE_TREE
#define NODE_THRESH 128
__constant__ _PixelOfNode node_tree_top[NODE_THRESH];
#endif
////////////////////////////////////////////////////////////////////
//////////////////////////////////
/// addr2id
//////////////////////////////////
__device__ int addr2id(unsigned int addr)
{
#if MERGETEX & REORDER_TREE
addr |= (((addr & 0x800) << 1) << 16);
addr &= 0xFFFF07FF;
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 16);
#elif REORDER_TREE
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 17);
#elif MERGETEX
return addr;
#else
return addr;
#endif
}
__device__ TextureAddress id2addr(int id)
{
TextureAddress retval;
#if MERGETEX & REORDER_TREE
// Half width is 2048 => 11 bits
// TEXBLOCKSIZE is 32 => 5 bits
int bigx = id & 0xFFFF; // 11 + 5 bits
int bigy = id >> 16;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
// now stuff y's 13th bit into x's 12th bit
retval.x |= (retval.y & 0x1000) >> 1;
retval.y &= 0xFFF;
#elif REORDER_TREE
int bigx = id & 0x1FFFF;
int bigy = id >> 17;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
#elif MERGETEX
retval.x = id;
#else
retval.x = id;
#endif
return retval;
}
#define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16))
//////////////////////////////////
/// arrayToAddress
//////////////////////////////////
__device__ void arrayToAddress(uchar3 arr, unsigned int& addr)
{
#if REORDER_TREE
addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16);
#else
addr = MKI(arr);
#endif
}
//////////////////////////////////
/// getRef
//////////////////////////////////
__device__ char getRef(int refpos
#if !REFTEX
,char* ref
#endif
)
{
#if REORDER_REF
int bigx = refpos & 0x3FFFF;
int bigy = refpos >> 18;
int y = (bigy << 2) + (bigx & 0x3);
int x = bigx >> 2;
#if REFTEX
return tex2D(reftex, x, y);
#else
return *(ref + 65536 * y + x);
#endif
#else
#if REFTEX
return tex1Dfetch(reftex, refpos);
#else
return ref[refpos];
#endif
#endif
}
//////////////////////////////////
/// RC
//////////////////////////////////
__device__ char rc(char c)
{
switch(c)
{
case 'A': return 'T';
case 'C': return 'G';
case 'G': return 'C';
case 'T': return 'A';
case 'q': return '\0';
default: return c;
};
}
//////////////////////////////////
/// getNode
//////////////////////////////////
__device__ uint4 getNode(unsigned int cur,
bool use_two_level
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (node_hist) { node_hist[id]++; }
#endif
#if TWO_LEVEL_NODE_TREE
int id = addr2id(cur);
if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; }
#endif
#if NODETEX
#if REORDER_TREE
return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(nodetex, cur);
#endif
#else
#if REORDER_TREE
return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (nodes + cur)->data;
#endif
#endif
}
//////////////////////////////////
/// getChildren
//////////////////////////////////
__device__ uint4 getChildren(unsigned int cur,
bool use_two_level
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, int* child_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (child_hist) { child_hist[id]++; }
#endif
#if TWO_LEVEL_CHILD_TREE
int id = addr2id(cur);
if (id < CHILD_THRESH) { return child_tree_top[id].data; }
#endif
#if CHILDTEX
#if REORDER_TREE
return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(childrentex, cur);
#endif
#else
#if REORDER_TREE
return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (childrenarr + cur)->data;
#endif
#endif
}
#if MERGETEX
//////////////////////////////////
/// getMerged
//////////////////////////////////
__device__ uint4 getMerged(
#if !NODETEX
_PixelOfNode * nodes,
_PixelOfChildren * childrenarr,
#endif
unsigned int cur,
int use_two_level,
int getChildrenData
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
, int* child_hist
#endif
)
{
// TextureAddress cur = _cur;
#if !REORDER_TREE
//cur.x *= 2;
unsigned int x = cur * 2;
int useChildrenForData = 0;
if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION)
{
x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION;
useChildrenForData = 1;
}
#else
unsigned short x = cur & 0x0000FFFF;
unsigned short y = (cur & 0xFFFF0000) >> 16;
int useChildrenForData = 0;
// WARNING INSANE HACK TO WORK AROUND NVCC BUG
goto TEST;
MASK:
x &= 0x7FF;
x *= 2;
goto INC;
TEST:
if (x >= 2048)
{
useChildrenForData = 1;
}
goto MASK;
INC:
#endif
x += getChildrenData;
#if !REORDER_TREE
cur = x;
#else
cur = (y << 16) | x;
#endif
if (useChildrenForData)
{
return getChildren(cur, use_two_level
#if !CHILDTEX
, childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, child_hist
#endif
);
}
else
{
return getNode(cur, use_two_level
#if !NODETEX
, nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, node_hist
#endif
);
}
}
#endif
//////////////////////////////////
/// printNode, Emulator only
//////////////////////////////////
#if VERBOSE
#if CHILDTEX && NODETEX
#define PRINTNODE(id) printNode(id)
#define PRINTNODES(s,e) printNodes(s,e)
#elif CHILDTEX
#define PRINTNODE(id) printNode(id, nodes)
#define PRINTNODES(s,e) printNodes(s, e, nodes)
#elif NODETEX
#define PRINTNODE(id) printNode(id, childarr)
#define PRINTNODES(s,e) printNodes(s, e, childrenarr)
#else
#define PRINTNODE(id) printNode(id, nodes, childrenarr)
#define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr)
#endif
__device__ void printNode(int nodeid
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
)
{
TextureAddress addr = id2addr(nodeid);
_PixelOfNode nd;
nd.data = GETNODE(addr.data, false);
_PixelOfChildren cd;
cd.data = GETCHILDREN(addr.data, false);
unsigned int a; arrayToAddress(cd.a, a);
unsigned int c; arrayToAddress(cd.c, c);
unsigned int g; arrayToAddress(cd.g, g);
unsigned int t; arrayToAddress(cd.t, t);
unsigned int d; arrayToAddress(cd.d, d);
unsigned int p; arrayToAddress(nd.parent, p);
unsigned int s; arrayToAddress(nd.suffix, s);
int start = MKI(nd.start);
int end = MKI(nd.end);
int depth = MKI(nd.depth);
char leafchar = cd.leafchar;
XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n",
nodeid, NID(addr), start, end, depth, leafchar,
NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s));
}
__device__ void printNodes(int start, int end
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
,_PixelOfChildren * childrenarr
#endif
)
{
XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n");
for (int i = start; i <= end; i++)
{
PRINTNODE(i);
}
}
#else // !VERBOSE
#define PRINTNODE(id)
#define PRINTNODES(s,e)
#endif
#if VERBOSE
#if NODETEX && CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#elif NODETEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr)
#elif CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes)
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr)
#endif
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#endif
//////////////////////////////////
/// set_result
//////////////////////////////////
__device__ void set_result(unsigned int cur,
_MatchCoord* result,
int edge_match_length,
int qry_match_len,
int min_match_len,
int rc
#if VERBOSE
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
, _PixelOfChildren * childrenarr
#endif
#endif
)
{
if (qry_match_len > min_match_len)
{
edge_match_length |= rc;
result->data = make_int2(cur, edge_match_length);
#if VERBOSE
_PixelOfNode nd; nd.data = GETNODE(cur, false);
XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n",
result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth));
#endif
}
else
{
XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len);
}
}
/////////////////////////////////////
// Compute forward substring matches
/////////////////////////////////////
__global__ void
mummergpuKernel(void* match_coords,
#if COALESCED_QUERIES
int* coordAddrs,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
int* queries,
#else
char* queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
#if !REFTEX
char* ref,
#endif
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
if (qryid == 0)
{
PRINTNODES(0,200);
}
int qlen = queryLengths[qryid];
int qryAddr = queryAddrs[qryid];
//TextureAddress cur;
unsigned int cur = 0;
//cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
_MatchCoord * result = MATCH_BASE(match_coords, qryid);
SHIFT_QUERIES(queries, qryAddr);
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++,
result += RESULT_SPAN)
{
//_PixelOfNode node;
unsigned int node_start;
unsigned int prev;
if ((cur == 0) || (qry_match_len < 1))
{
// start at root of tree
GOROOT(cur);
qry_match_len = 1;
mustmatch = 0;
}
char c = GETQCHAR(qrystart + qry_match_len);
XPRINTF("In node ("fNID"): starting with %c [%d] => \n",
NID(cur), c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = GETCHILDRENHIST(cur, false);
prev = cur;
uchar3 next;
switch (c)
{
case 'A': next = children.a; break;
case 'C': next = children.c; break;
case 'G': next = children.g; break;
case 'T': next = children.t; break;
default: next = make_uchar3(0,0,0); break;
};
arrayToAddress(next, cur);
XPRINTF(" In node: ("fNID")\n", NID(cur));
// No edge to follow out of the node
if (cur == 0) {
XPRINTF(" no edge\n");
SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
_PixelOfNode node;
node.data = GETNODEHIST(cur, true);
node_start = MKI(node.start);
unsigned int node_end = MKI(node.end);
XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end);
{
int edgelen = node_end - node_start + 1;
int edge_matchlen = node_start + mustmatch;
int past_node_end = node_end + 1;
int dist_to_edge_end = mustmatch - edgelen;
if (mustmatch) {
refpos = min(edge_matchlen, past_node_end);
qry_match_len += min(edgelen, mustmatch);
mustmatch = max(dist_to_edge_end, 0);
}
else {
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node_start + 1;
}
}
c = GETQCHAR(qrystart + qry_match_len);
while (refpos <= node_end && c != '\0')
{
char r = GETRCHAR(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = GETQCHAR(qrystart + qry_match_len);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
{
//_PixelOfNode node;
//node.data = getnodehist(cur, false);
SET_RESULT(cur, result, refpos - node_start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node_start;
qry_match_len -= mustmatch + 1;
}
NEXT_SUBSTRING:
{
_PixelOfNode node;
node.data = GETNODEHIST(prev, false);
arrayToAddress(node.suffix, cur);
}
//XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n",
// mustmatch, qry_match_len, NID(cur));
do {} while (0);
}
return;
}
///////////////////////////////////////
//// Compute reverse substring matches
///////////////////////////////////////
__global__ void
mummergpuRCKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
/*
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
int qlen = queryLengths[qryid];
XPRINTF("> rc qryid: %d\n", qryid);
queries++; // skip the 'q' character
// start at root for first query character
TextureAddress cur;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
for (int qrystart = qlen;
qrystart >= min_match_len ;
qrystart--, result++)
{
#if VERBOSE
queries[qrystart] = '\0';
XPRINTF("qry: ", queries);
for (int j = qrystart-1; j >= 0; j--)
{ XPRINTF("%c", rc(queries[j])); }
XPRINTF("\n");
#endif
_PixelOfNode node;
TextureAddress prev;
if (((cur.data == 0)) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = rc(queries[qrystart-qry_match_len]);
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
SET_RESULT(prev, (_MatchCoord*)result, 0, qry_match_len, min_match_len,
REVERSE);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
node.data = tex2D(nodetex, cur.data & 0xFFFF, cur.data >> 16);
}
XPRINTF(" Edge coordinates: %d - %d\n", MKI(node.start), MKI(node.end));
if (mustmatch)
{
int edgelen = MKI(node.end) - MKI(node.start)+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = MKI(node.end)+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, MKI(node.start)+mustmatch);
qry_match_len += mustmatch;
refpos = MKI(node.start) + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = MKI(node.start)+1;
}
c = rc(queries[qrystart-qry_match_len]);
while (refpos <= MKI(node.end) && c != '\0')
{
char r = getRef(refpos
#if !REFTEX
//FIXME: this needs to be a pointer to ref->d_ref_array
,NULL
#endif
);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (MKI(node.start)));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = rc(queries[qrystart-qry_match_len]);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
SET_RESULT(cur, (_MatchCoord*)result, refpos - MKI(node.start), qry_match_len,
min_match_len, REVERSE);
mustmatch = refpos - MKI(node.start);
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
*/
return;
}
__global__ void
printKernel(MatchInfo * matches,
int totalMatches,
Alignment * alignments,
#if !QRYTEX
#if COALESCED_QUERIES
int * queries,
#else
char * queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
const int * queryAddrs,
const int * queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (matchid >= totalMatches) { return; }
XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid);
int qryAddr = queryAddrs[matches[matchid].queryid];
SHIFT_QUERIES(queries, qryAddr);
#if !QRYTEX
XPRINTF("query: %s\n", queries);
#endif
char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos);
// Find the top node to start printing from
unsigned int matchaddr = matches[matchid].matchnode.data;
unsigned int cur = matchaddr;
unsigned int printParent = cur;
_PixelOfNode node;
node.data = GETNODE(cur, true);
XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth));
while (MKI(node.depth) > min_match_length)
{
printParent = cur;
arrayToAddress(node.parent, cur);
node.data = GETNODE(cur, true);
XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth));
}
// traverse the tree starting at printParent
unsigned int badParent = cur;
cur = printParent;
XPRINTF(" printParent: "fNID"\n", NID(printParent));
char curchild = 'A';
bool forceToParent = false;
node.data = GETNODE(printParent, true);
int matchlen = MKI(node.depth) - 1;
int depthToGoldenPath = 0;
int matchnum = matches[matchid].resultsoffset;
// If the printparent is the matchnode, then we are already off the golden path
if (printParent == matchaddr)
{
if (matches[matchid].edgematch > 0)
{
node.data = GETNODE(badParent, true);
matchlen = MKI(node.depth)-1+matches[matchid].edgematch;
}
depthToGoldenPath = 1;
}
// keep going until I hit the printParent's parent
while (cur != badParent)
{
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char isLeaf = children.leafchar;
XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n",
NID(cur), curchild, isLeaf, forceToParent);
if (isLeaf || forceToParent)
{
// See if I am left maximal and print
if (isLeaf && isLeaf != queryflankingbase)
{
int leafid = MKI(children.leafid);
int left_in_ref = (leafid - 1) + page_begin;
int right_in_ref = left_in_ref + matchlen;
if ((left_in_ref != page_begin || page_shadow_left == -1) &&
(right_in_ref != page_end || page_shadow_right == -1))
{
if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left))
{
//sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen);
XPRINTF("%8d%10d%10d\n",
left_in_ref,
matches[matchid].qrystartpos+1,
matchlen);
alignments[matchnum].left_in_ref = left_in_ref;
alignments[matchnum].matchlen = matchlen;
matchnum++;
}
}
}
forceToParent = false;
// now return to my parent and advance curchild
node.data = GETNODE(cur, true);
unsigned int myParent;
arrayToAddress(node.parent, myParent);
_PixelOfChildren pchildren;
pchildren.data = GETCHILDREN(myParent, true);
unsigned int pa, pc, pg, pt;
arrayToAddress(pchildren.a, pa);
arrayToAddress(pchildren.c, pc);
arrayToAddress(pchildren.g, pg);
arrayToAddress(pchildren.t, pt);
if (pa == cur) { curchild = 'C'; }
else if (pc == cur) { curchild = 'G'; }
else if (pg == cur) { curchild = 'T'; }
else if (pt == cur) { curchild = '$'; }
else // I must be the $ child, go up a level
{
forceToParent = true;
}
cur = myParent;
if (depthToGoldenPath) { depthToGoldenPath--; }
if (depthToGoldenPath == 0)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
}
}
else
{
// try to walk down the tree
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char goldenChild = 0;
if (depthToGoldenPath == 0)
{
// we are currently on the golden path
// one of the children is also on the golden path
goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1);
}
do
{
if (curchild == 'A')
{
if (children.a.x || children.a.y || children.a.z)
{
XPRINTF(" -> A\n");
arrayToAddress(children.a, cur);
break;
}
curchild = 'C';
}
if (curchild == 'C')
{
if (children.c.x || children.c.y || children.c.z)
{
XPRINTF(" -> C\n");
arrayToAddress(children.c, cur);
break;
}
curchild = 'G';
}
if (curchild == 'G')
{
if (children.g.x || children.g.y || children.g.z)
{
XPRINTF(" -> G\n");
arrayToAddress(children.g, cur);
break;
}
curchild = 'T';
}
if (curchild == 'T')
{
if (children.t.x || children.t.y || children.t.z)
{
XPRINTF(" -> T\n");
arrayToAddress(children.t, cur);
break;
}
curchild = '$';
}
if (curchild == '$')
{
if (children.d.x || children.d.y || children.d.z)
{
XPRINTF(" -> $\n");
arrayToAddress(children.d, cur);
break;
}
}
// checked all of the children, go back to parent
forceToParent = true;
}
while (0);
if (!forceToParent)
{
if (depthToGoldenPath == 0)
{
if (curchild == goldenChild)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
if (cur == matchaddr)
{
// we overextended the golden path
depthToGoldenPath = 1;
if (matches[matchid].edgematch > 0)
{
unsigned int par;
arrayToAddress(node.parent, par);
node.data = GETNODE(par, true);
matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch;
}
}
}
else
{
depthToGoldenPath = 1;
}
}
else
{
depthToGoldenPath++;
}
curchild = 'A';
}
}
}
}
#endif // #ifndef _MUMMERGPU_HH_
| b2f42cfbbf6ac66c2c10e4b2e27f8d2131a16321.cu | #ifndef _MUMMERGPU_KERNEL_H_
#define _MUMMERGPU_KERNEL_H_
#include <stdio.h>
#include <common.cu>
#ifdef n__DEVICE_EMULATION__
#define XPRINTF(...) printf(__VA_ARGS__)
#define VERBOSE 0
#else
#define XPRINTF(...) do{}while(0)
#define VERBOSE 0
#endif
#define WARP_SIZE 16
#if REORDER_TREE
#define fNID "%d,%d"
#define NID(addr) (addr & 0x0000FFFF), ((addr & 0xFFFF0000)>>16)
#define GOROOT(addr) addr = 0x00010000
//#define GOROOT(addr) addr.x = 0; addr.y = 1
#else
#define fNID "%d"
#define NID(addr) addr
#define GOROOT(addr) addr = 1
#endif
#if COALESCED_QUERIES
#define GETQCHAR(qrypos) ((queries[((qrypos) >> 2) << 4]) & ((0xFF) << (((qrypos) & 0x00000003)) << 3)) >> ((((qrypos) & 0x00000003 )) << 3)
#elif QRYTEX
#define GETQCHAR(qrypos) tex1Dfetch(qrytex, qryAddr + qrypos)
#else
#define GETQCHAR(qrypos) queries[qrypos]
#endif
#if COALESCED_QUERIES
#define RESULT_SPAN WARP_SIZE
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + coordAddrs[qryid]
#else
#define RESULT_SPAN 1
#define MATCH_BASE(match_coords, qryid) (_MatchCoord*)match_coords + qryAddr - __umul24(qryid, min_match_len + 1)
#endif
#if REFTEX
#define GETRCHAR(refpos) getRef(refpos)
#else
#define GETRCHAR(refpos) getRef(refpos, ref)
#endif
#if MERGETEX
#if TREE_ACCESS_HISTOGRAM
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1, node_hist, child_hist)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, NULL, NULL)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0, node_hist, child_hist)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, NULL, NULL)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1, node_hist, child_hist)
#endif
#else
#if NODETEX
#define GETNODE(addr, two_level) getMerged(addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(addr, two_level, 1)
#else
#define GETNODE(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETNODEHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 0)
#define GETCHILDREN(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#define GETCHILDRENHIST(addr, two_level) getMerged(nodes, childrenarr, addr, two_level, 1)
#endif
#endif
#else
#if NODETEX
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level)
#define GETNODE(addr, two_level) getNode(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes, node_hist)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes, NULL)
#else
#define GETNODEHIST(addr, two_level) getNode(addr, two_level, nodes)
#define GETNODE(addr, two_level) getNode(addr, two_level, nodes)
#endif
#endif
#if CHILDTEX
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level)
#endif
#else
#if TREE_ACCESS_HISTOGRAM
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr, child_hist)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr, NULL)
#else
#define GETCHILDRENHIST(addr, two_level) getChildren(addr, two_level, childrenarr)
#define GETCHILDREN(addr, two_level) getChildren(addr, two_level, childrenarr)
#endif
#endif
#endif
#if QRYTEX
#define SHIFT_QUERIES(queries, qryAddr)
#else
#define SHIFT_QUERIES(queries, qryAddr) queries += qryAddr
#endif
#if REORDER_TREE
texture<uint4, 2, cudaReadModeElementType> nodetex;
texture<uint4, 2, cudaReadModeElementType> childrentex;
#else
texture<uint4, 1, cudaReadModeElementType> nodetex;
texture<uint4, 1, cudaReadModeElementType> childrentex;
#endif
#if REORDER_REF
texture<char, 2, cudaReadModeElementType> reftex;
#else
texture<char, 1, cudaReadModeElementType> reftex;
#endif
texture<char, 1, cudaReadModeElementType> qrytex;
struct __align__(8) _MatchCoord
{
union
{
int2 data;
struct
{
int node; // match node
int edge_match_length; // number of matching characters UP the parent edge
};
};
};
// If leafchar is 0, store the ACGT$ links, else store the leafid
struct _PixelOfChildren
{
union
{
uint4 data;
union
{
struct
{
uchar3 a;
uchar3 c;
uchar3 g;
uchar3 t;
uchar3 d;
char leafchar;
};
struct
{
uchar3 leafid;
unsigned char pad [12];
char leafchar0;
};
};
};
};
// Store the start, end coordinate of node, and the parent, suffix links
struct _PixelOfNode
{
union
{
uint4 data;
struct
{
uchar3 parent;
uchar3 suffix;
uchar3 start;
uchar3 end;
uchar3 depth;
unsigned char pad;
};
};
};
#if TWO_LEVEL_CHILD_TREE
#define CHILD_THRESH 128
__constant__ _PixelOfChildren child_tree_top[CHILD_THRESH];
#endif
#if TWO_LEVEL_NODE_TREE
#define NODE_THRESH 128
__constant__ _PixelOfNode node_tree_top[NODE_THRESH];
#endif
////////////////////////////////////////////////////////////////////
//////////////////////////////////
/// addr2id
//////////////////////////////////
__device__ int addr2id(unsigned int addr)
{
#if MERGETEX & REORDER_TREE
addr |= (((addr & 0x800) << 1) << 16);
addr &= 0xFFFF07FF;
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 16);
#elif REORDER_TREE
int blocky = (addr >> 16) & 0x1F;
int bigy = (addr >> 16) >> 5;
int bigx = ((addr & 0x0000FFFF) << 5) + blocky;
return bigx + (bigy << 17);
#elif MERGETEX
return addr;
#else
return addr;
#endif
}
__device__ TextureAddress id2addr(int id)
{
TextureAddress retval;
#if MERGETEX & REORDER_TREE
// Half width is 2048 => 11 bits
// TEXBLOCKSIZE is 32 => 5 bits
int bigx = id & 0xFFFF; // 11 + 5 bits
int bigy = id >> 16;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
// now stuff y's 13th bit into x's 12th bit
retval.x |= (retval.y & 0x1000) >> 1;
retval.y &= 0xFFF;
#elif REORDER_TREE
int bigx = id & 0x1FFFF;
int bigy = id >> 17;
retval.y = (bigy << 5) + (bigx & 0x1F);
retval.x = bigx >> 5;
#elif MERGETEX
retval.x = id;
#else
retval.x = id;
#endif
return retval;
}
#define MKI(uc3) (uc3.x | (uc3.y << 8) | (uc3.z << 16))
//////////////////////////////////
/// arrayToAddress
//////////////////////////////////
__device__ void arrayToAddress(uchar3 arr, unsigned int& addr)
{
#if REORDER_TREE
addr = (arr.x | ((arr.z & 0xF) << 8)) | ((arr.y | ((arr.z & 0xF0) << 4)) << 16);
#else
addr = MKI(arr);
#endif
}
//////////////////////////////////
/// getRef
//////////////////////////////////
__device__ char getRef(int refpos
#if !REFTEX
,char* ref
#endif
)
{
#if REORDER_REF
int bigx = refpos & 0x3FFFF;
int bigy = refpos >> 18;
int y = (bigy << 2) + (bigx & 0x3);
int x = bigx >> 2;
#if REFTEX
return tex2D(reftex, x, y);
#else
return *(ref + 65536 * y + x);
#endif
#else
#if REFTEX
return tex1Dfetch(reftex, refpos);
#else
return ref[refpos];
#endif
#endif
}
//////////////////////////////////
/// RC
//////////////////////////////////
__device__ char rc(char c)
{
switch(c)
{
case 'A': return 'T';
case 'C': return 'G';
case 'G': return 'C';
case 'T': return 'A';
case 'q': return '\0';
default: return c;
};
}
//////////////////////////////////
/// getNode
//////////////////////////////////
__device__ uint4 getNode(unsigned int cur,
bool use_two_level
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (node_hist) { node_hist[id]++; }
#endif
#if TWO_LEVEL_NODE_TREE
int id = addr2id(cur);
if (use_two_level && id < NODE_THRESH) { return node_tree_top[id].data; }
#endif
#if NODETEX
#if REORDER_TREE
return tex2D(nodetex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(nodetex, cur);
#endif
#else
#if REORDER_TREE
return (nodes + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (nodes + cur)->data;
#endif
#endif
}
//////////////////////////////////
/// getChildren
//////////////////////////////////
__device__ uint4 getChildren(unsigned int cur,
bool use_two_level
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, int* child_hist
#endif
)
{
#if TREE_ACCESS_HISTOGRAM
int id = addr2id(cur);
if (child_hist) { child_hist[id]++; }
#endif
#if TWO_LEVEL_CHILD_TREE
int id = addr2id(cur);
if (id < CHILD_THRESH) { return child_tree_top[id].data; }
#endif
#if CHILDTEX
#if REORDER_TREE
return tex2D(childrentex, cur & 0x0000FFFF, (cur & 0xFFFF0000) >> 16);
#else
return tex1Dfetch(childrentex, cur);
#endif
#else
#if REORDER_TREE
return (childrenarr + (cur & 0x0000FFFF) + (((cur & 0xFFFF0000)>>16) * MAX_TEXTURE_DIMENSION))->data;
#else
return (childrenarr + cur)->data;
#endif
#endif
}
#if MERGETEX
//////////////////////////////////
/// getMerged
//////////////////////////////////
__device__ uint4 getMerged(
#if !NODETEX
_PixelOfNode * nodes,
_PixelOfChildren * childrenarr,
#endif
unsigned int cur,
int use_two_level,
int getChildrenData
#if TREE_ACCESS_HISTOGRAM
, int* node_hist
, int* child_hist
#endif
)
{
// TextureAddress cur = _cur;
#if !REORDER_TREE
//cur.x *= 2;
unsigned int x = cur * 2;
int useChildrenForData = 0;
if (x >= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION)
{
x -= MAX_TEXTURE_DIMENSION*MAX_TEXTURE_DIMENSION;
useChildrenForData = 1;
}
#else
unsigned short x = cur & 0x0000FFFF;
unsigned short y = (cur & 0xFFFF0000) >> 16;
int useChildrenForData = 0;
// WARNING INSANE HACK TO WORK AROUND NVCC BUG
goto TEST;
MASK:
x &= 0x7FF;
x *= 2;
goto INC;
TEST:
if (x >= 2048)
{
useChildrenForData = 1;
}
goto MASK;
INC:
#endif
x += getChildrenData;
#if !REORDER_TREE
cur = x;
#else
cur = (y << 16) | x;
#endif
if (useChildrenForData)
{
return getChildren(cur, use_two_level
#if !CHILDTEX
, childrenarr
#endif
#if TREE_ACCESS_HISTOGRAM
, child_hist
#endif
);
}
else
{
return getNode(cur, use_two_level
#if !NODETEX
, nodes
#endif
#if TREE_ACCESS_HISTOGRAM
, node_hist
#endif
);
}
}
#endif
//////////////////////////////////
/// printNode, Emulator only
//////////////////////////////////
#if VERBOSE
#if CHILDTEX && NODETEX
#define PRINTNODE(id) printNode(id)
#define PRINTNODES(s,e) printNodes(s,e)
#elif CHILDTEX
#define PRINTNODE(id) printNode(id, nodes)
#define PRINTNODES(s,e) printNodes(s, e, nodes)
#elif NODETEX
#define PRINTNODE(id) printNode(id, childarr)
#define PRINTNODES(s,e) printNodes(s, e, childrenarr)
#else
#define PRINTNODE(id) printNode(id, nodes, childrenarr)
#define PRINTNODES(s,e) printNodes(s, e, nodes, childrenarr)
#endif
__device__ void printNode(int nodeid
#if !NODETEX
, _PixelOfNode* nodes
#endif
#if !CHILDTEX
, _PixelOfChildren* childrenarr
#endif
)
{
TextureAddress addr = id2addr(nodeid);
_PixelOfNode nd;
nd.data = GETNODE(addr.data, false);
_PixelOfChildren cd;
cd.data = GETCHILDREN(addr.data, false);
unsigned int a; arrayToAddress(cd.a, a);
unsigned int c; arrayToAddress(cd.c, c);
unsigned int g; arrayToAddress(cd.g, g);
unsigned int t; arrayToAddress(cd.t, t);
unsigned int d; arrayToAddress(cd.d, d);
unsigned int p; arrayToAddress(nd.parent, p);
unsigned int s; arrayToAddress(nd.suffix, s);
int start = MKI(nd.start);
int end = MKI(nd.end);
int depth = MKI(nd.depth);
char leafchar = cd.leafchar;
XPRINTF("%d\t"fNID"\t%d\t%d\t%d\t%d\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\t"fNID"\n",
nodeid, NID(addr), start, end, depth, leafchar,
NID(a), NID(c), NID(g), NID(t), NID(d), NID(p), NID(s));
}
__device__ void printNodes(int start, int end
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
,_PixelOfChildren * childrenarr
#endif
)
{
XPRINTF("id\taddr\tstart\tend\tdepth\tleaf\ta\tc\tg\tt\t$\tp\ts\n");
for (int i = start; i <= end; i++)
{
PRINTNODE(i);
}
}
#else // !VERBOSE
#define PRINTNODE(id)
#define PRINTNODES(s,e)
#endif
#if VERBOSE
#if NODETEX && CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#elif NODETEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, childrenarr)
#elif CHILDTEX
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes)
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc, nodes, childrenarr)
#endif
#else
#define SET_RESULT(c, r, e, q, m, rc) set_result(c, r, e, q, m, rc)
#endif
//////////////////////////////////
/// set_result
//////////////////////////////////
__device__ void set_result(unsigned int cur,
_MatchCoord* result,
int edge_match_length,
int qry_match_len,
int min_match_len,
int rc
#if VERBOSE
#if !NODETEX
, _PixelOfNode * nodes
#endif
#if !CHILDTEX
, _PixelOfChildren * childrenarr
#endif
#endif
)
{
if (qry_match_len > min_match_len)
{
edge_match_length |= rc;
result->data = make_int2(cur, edge_match_length);
#if VERBOSE
_PixelOfNode nd; nd.data = GETNODE(cur, false);
XPRINTF(" saving match cur=%d "fNID" len=%d edge_match=%d depth=%d\n",
result->data.x, NID(cur), qry_match_len, edge_match_length, MKI(nd.depth));
#endif
}
else
{
XPRINTF(" match too short (%d < %d)\n", qry_match_len, min_match_len);
}
}
/////////////////////////////////////
// Compute forward substring matches
/////////////////////////////////////
__global__ void
mummergpuKernel(void* match_coords,
#if COALESCED_QUERIES
int* coordAddrs,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
int* queries,
#else
char* queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
#if !REFTEX
char* ref,
#endif
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int qryid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
if (qryid == 0)
{
PRINTNODES(0,200);
}
int qlen = queryLengths[qryid];
int qryAddr = queryAddrs[qryid];
//TextureAddress cur;
unsigned int cur = 0;
//cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
_MatchCoord * result = MATCH_BASE(match_coords, qryid);
SHIFT_QUERIES(queries, qryAddr);
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++,
result += RESULT_SPAN)
{
//_PixelOfNode node;
unsigned int node_start;
unsigned int prev;
if ((cur == 0) || (qry_match_len < 1))
{
// start at root of tree
GOROOT(cur);
qry_match_len = 1;
mustmatch = 0;
}
char c = GETQCHAR(qrystart + qry_match_len);
XPRINTF("In node ("fNID"): starting with %c [%d] => \n",
NID(cur), c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = GETCHILDRENHIST(cur, false);
prev = cur;
uchar3 next;
switch (c)
{
case 'A': next = children.a; break;
case 'C': next = children.c; break;
case 'G': next = children.g; break;
case 'T': next = children.t; break;
default: next = make_uchar3(0,0,0); break;
};
arrayToAddress(next, cur);
XPRINTF(" In node: ("fNID")\n", NID(cur));
// No edge to follow out of the node
if (cur == 0) {
XPRINTF(" no edge\n");
SET_RESULT(prev, result, 0, qry_match_len, min_match_len, FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
_PixelOfNode node;
node.data = GETNODEHIST(cur, true);
node_start = MKI(node.start);
unsigned int node_end = MKI(node.end);
XPRINTF(" Edge coordinates: %d - %d\n", node_start, node_end);
{
int edgelen = node_end - node_start + 1;
int edge_matchlen = node_start + mustmatch;
int past_node_end = node_end + 1;
int dist_to_edge_end = mustmatch - edgelen;
if (mustmatch) {
refpos = min(edge_matchlen, past_node_end);
qry_match_len += min(edgelen, mustmatch);
mustmatch = max(dist_to_edge_end, 0);
}
else {
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node_start + 1;
}
}
c = GETQCHAR(qrystart + qry_match_len);
while (refpos <= node_end && c != '\0')
{
char r = GETRCHAR(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len, refpos - (node_start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = GETQCHAR(qrystart + qry_match_len);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
{
//_PixelOfNode node;
//node.data = getnodehist(cur, false);
SET_RESULT(cur, result, refpos - node_start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node_start;
qry_match_len -= mustmatch + 1;
}
NEXT_SUBSTRING:
{
_PixelOfNode node;
node.data = GETNODEHIST(prev, false);
arrayToAddress(node.suffix, cur);
}
//XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:("fNID")\n",
// mustmatch, qry_match_len, NID(cur));
do {} while (0);
}
return;
}
///////////////////////////////////////
//// Compute reverse substring matches
///////////////////////////////////////
__global__ void
mummergpuRCKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
/*
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
int qlen = queryLengths[qryid];
XPRINTF("> rc qryid: %d\n", qryid);
queries++; // skip the 'q' character
// start at root for first query character
TextureAddress cur;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
for (int qrystart = qlen;
qrystart >= min_match_len ;
qrystart--, result++)
{
#if VERBOSE
queries[qrystart] = '\0';
XPRINTF("qry: ", queries);
for (int j = qrystart-1; j >= 0; j--)
{ XPRINTF("%c", rc(queries[j])); }
XPRINTF("\n");
#endif
_PixelOfNode node;
TextureAddress prev;
if (((cur.data == 0)) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = rc(queries[qrystart-qry_match_len]);
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
_PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
SET_RESULT(prev, (_MatchCoord*)result, 0, qry_match_len, min_match_len,
REVERSE);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
node.data = tex2D(nodetex, cur.data & 0xFFFF, cur.data >> 16);
}
XPRINTF(" Edge coordinates: %d - %d\n", MKI(node.start), MKI(node.end));
if (mustmatch)
{
int edgelen = MKI(node.end) - MKI(node.start)+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = MKI(node.end)+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, MKI(node.start)+mustmatch);
qry_match_len += mustmatch;
refpos = MKI(node.start) + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = MKI(node.start)+1;
}
c = rc(queries[qrystart-qry_match_len]);
while (refpos <= MKI(node.end) && c != '\0')
{
char r = getRef(refpos
#if !REFTEX
//FIXME: this needs to be a pointer to ref->d_ref_array
,NULL
#endif
);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (MKI(node.start)));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = rc(queries[qrystart-qry_match_len]);
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
SET_RESULT(cur, (_MatchCoord*)result, refpos - MKI(node.start), qry_match_len,
min_match_len, REVERSE);
mustmatch = refpos - MKI(node.start);
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
*/
return;
}
__global__ void
printKernel(MatchInfo * matches,
int totalMatches,
Alignment * alignments,
#if !QRYTEX
#if COALESCED_QUERIES
int * queries,
#else
char * queries,
#endif
#endif
#if !NODETEX
_PixelOfNode* nodes,
#endif
#if !CHILDTEX
_PixelOfChildren* childrenarr,
#endif
const int * queryAddrs,
const int * queryLengths,
const int page_begin,
const int page_end,
const int page_shadow_left,
const int page_shadow_right,
const int min_match_length
#if TREE_ACCESS_HISTOGRAM
,int* node_hist,
int* child_hist
#endif
)
{
int matchid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (matchid >= totalMatches) { return; }
XPRINTF(">matchid: %d qry: %d\n", matchid, matches[matchid].queryid);
int qryAddr = queryAddrs[matches[matchid].queryid];
SHIFT_QUERIES(queries, qryAddr);
#if !QRYTEX
XPRINTF("query: %s\n", queries);
#endif
char queryflankingbase = GETQCHAR(matches[matchid].qrystartpos);
// Find the top node to start printing from
unsigned int matchaddr = matches[matchid].matchnode.data;
unsigned int cur = matchaddr;
unsigned int printParent = cur;
_PixelOfNode node;
node.data = GETNODE(cur, true);
XPRINTF("starting node: %d "fNID" depth: %d\n", matches[matchid].matchnode, NID(cur), MKI(node.depth));
while (MKI(node.depth) > min_match_length)
{
printParent = cur;
arrayToAddress(node.parent, cur);
node.data = GETNODE(cur, true);
XPRINTF("par: "fNID" depth: %d\n", NID(cur), MKI(node.depth));
}
// traverse the tree starting at printParent
unsigned int badParent = cur;
cur = printParent;
XPRINTF(" printParent: "fNID"\n", NID(printParent));
char curchild = 'A';
bool forceToParent = false;
node.data = GETNODE(printParent, true);
int matchlen = MKI(node.depth) - 1;
int depthToGoldenPath = 0;
int matchnum = matches[matchid].resultsoffset;
// If the printparent is the matchnode, then we are already off the golden path
if (printParent == matchaddr)
{
if (matches[matchid].edgematch > 0)
{
node.data = GETNODE(badParent, true);
matchlen = MKI(node.depth)-1+matches[matchid].edgematch;
}
depthToGoldenPath = 1;
}
// keep going until I hit the printParent's parent
while (cur != badParent)
{
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char isLeaf = children.leafchar;
XPRINTF(" cur: "fNID" curchild: %c isLeaf:%d forceToParent:%d\n",
NID(cur), curchild, isLeaf, forceToParent);
if (isLeaf || forceToParent)
{
// See if I am left maximal and print
if (isLeaf && isLeaf != queryflankingbase)
{
int leafid = MKI(children.leafid);
int left_in_ref = (leafid - 1) + page_begin;
int right_in_ref = left_in_ref + matchlen;
if ((left_in_ref != page_begin || page_shadow_left == -1) &&
(right_in_ref != page_end || page_shadow_right == -1))
{
if (!(left_in_ref > page_begin && right_in_ref < page_shadow_left))
{
//sprintf(buf, "%8d%10d%10d\n", left_in_ref, qrystartpos+1, matchlen);
XPRINTF("%8d%10d%10d\n",
left_in_ref,
matches[matchid].qrystartpos+1,
matchlen);
alignments[matchnum].left_in_ref = left_in_ref;
alignments[matchnum].matchlen = matchlen;
matchnum++;
}
}
}
forceToParent = false;
// now return to my parent and advance curchild
node.data = GETNODE(cur, true);
unsigned int myParent;
arrayToAddress(node.parent, myParent);
_PixelOfChildren pchildren;
pchildren.data = GETCHILDREN(myParent, true);
unsigned int pa, pc, pg, pt;
arrayToAddress(pchildren.a, pa);
arrayToAddress(pchildren.c, pc);
arrayToAddress(pchildren.g, pg);
arrayToAddress(pchildren.t, pt);
if (pa == cur) { curchild = 'C'; }
else if (pc == cur) { curchild = 'G'; }
else if (pg == cur) { curchild = 'T'; }
else if (pt == cur) { curchild = '$'; }
else // I must be the $ child, go up a level
{
forceToParent = true;
}
cur = myParent;
if (depthToGoldenPath) { depthToGoldenPath--; }
if (depthToGoldenPath == 0)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
}
}
else
{
// try to walk down the tree
_PixelOfChildren children;
children.data = GETCHILDREN(cur, true);
char goldenChild = 0;
if (depthToGoldenPath == 0)
{
// we are currently on the golden path
// one of the children is also on the golden path
goldenChild = GETQCHAR(matches[matchid].qrystartpos+matchlen+1);
}
do
{
if (curchild == 'A')
{
if (children.a.x || children.a.y || children.a.z)
{
XPRINTF(" -> A\n");
arrayToAddress(children.a, cur);
break;
}
curchild = 'C';
}
if (curchild == 'C')
{
if (children.c.x || children.c.y || children.c.z)
{
XPRINTF(" -> C\n");
arrayToAddress(children.c, cur);
break;
}
curchild = 'G';
}
if (curchild == 'G')
{
if (children.g.x || children.g.y || children.g.z)
{
XPRINTF(" -> G\n");
arrayToAddress(children.g, cur);
break;
}
curchild = 'T';
}
if (curchild == 'T')
{
if (children.t.x || children.t.y || children.t.z)
{
XPRINTF(" -> T\n");
arrayToAddress(children.t, cur);
break;
}
curchild = '$';
}
if (curchild == '$')
{
if (children.d.x || children.d.y || children.d.z)
{
XPRINTF(" -> $\n");
arrayToAddress(children.d, cur);
break;
}
}
// checked all of the children, go back to parent
forceToParent = true;
}
while (0);
if (!forceToParent)
{
if (depthToGoldenPath == 0)
{
if (curchild == goldenChild)
{
node.data = GETNODE(cur, true);
matchlen = MKI(node.depth)-1;
if (cur == matchaddr)
{
// we overextended the golden path
depthToGoldenPath = 1;
if (matches[matchid].edgematch > 0)
{
unsigned int par;
arrayToAddress(node.parent, par);
node.data = GETNODE(par, true);
matchlen = MKI(node.depth) - 1 + matches[matchid].edgematch;
}
}
}
else
{
depthToGoldenPath = 1;
}
}
else
{
depthToGoldenPath++;
}
curchild = 'A';
}
}
}
}
#endif // #ifndef _MUMMERGPU_HH_
|
5d816ef487dfaaa4050183008006d3293cdf1497.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=tex1Dfetch(texmem2,tid*j)+I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += tex1Dfetch(texmem3,tid*j)* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j)+A[tid];
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 5d816ef487dfaaa4050183008006d3293cdf1497.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=tex1Dfetch(texmem2,tid*j)+I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += tex1Dfetch(texmem3,tid*j)* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j)+A[tid];
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
75b0b47fb17fd4cc9dd8a71a32d8c81723cca46e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file generateVelB.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the kernels to generate body-velocities.
*/
#include "generateVelB.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Stores an element of the u- and v- body-velocities into one single array.
*
* \param velB vector that contains both u- and v- velocities
* \param uB u-velocity of body points (all bodies included)
* \param vB v-velocity of body points (all bodies included)
* \param totalPoints number of body points (all bodies included)
*/
__global__
void fill_velB(real *velB, real *uB, real *vB, int totalPoints)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<totalPoints)
{
velB[k] = uB[k];
velB[k + totalPoints] = vB[k];
}
}
} // end of namespace kernels
| 75b0b47fb17fd4cc9dd8a71a32d8c81723cca46e.cu | /***************************************************************************//**
* \file generateVelB.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the kernels to generate body-velocities.
*/
#include "generateVelB.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Stores an element of the u- and v- body-velocities into one single array.
*
* \param velB vector that contains both u- and v- velocities
* \param uB u-velocity of body points (all bodies included)
* \param vB v-velocity of body points (all bodies included)
* \param totalPoints number of body points (all bodies included)
*/
__global__
void fill_velB(real *velB, real *uB, real *vB, int totalPoints)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<totalPoints)
{
velB[k] = uB[k];
velB[k + totalPoints] = vB[k];
}
}
} // end of namespace kernels
|
ec36612ec67f5a99603d548105a1527ee0bc0151.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "multiplyBy2_self.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
long *inout = NULL;
hipMalloc(&inout, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
multiplyBy2_self), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inout);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
multiplyBy2_self), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inout);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
multiplyBy2_self), dim3(gridBlock),dim3(threadBlock), 0, 0, size,inout);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ec36612ec67f5a99603d548105a1527ee0bc0151.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "multiplyBy2_self.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
long *inout = NULL;
cudaMalloc(&inout, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
multiplyBy2_self<<<gridBlock,threadBlock>>>(size,inout);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
multiplyBy2_self<<<gridBlock,threadBlock>>>(size,inout);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
multiplyBy2_self<<<gridBlock,threadBlock>>>(size,inout);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a7ad54fa976ab63a4aba3596169bba6fecb1d32e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pointGenKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *points = NULL;
hipMalloc(&points, XSIZE*YSIZE);
float *dirs = NULL;
hipMalloc(&dirs, XSIZE*YSIZE);
int nBBS0 = 1;
int nelems = 1;
float minimum = 1;
float step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nBBS0,nelems,minimum,step);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nBBS0,nelems,minimum,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pointGenKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, points,dirs,nBBS0,nelems,minimum,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a7ad54fa976ab63a4aba3596169bba6fecb1d32e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pointGenKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *points = NULL;
cudaMalloc(&points, XSIZE*YSIZE);
float *dirs = NULL;
cudaMalloc(&dirs, XSIZE*YSIZE);
int nBBS0 = 1;
int nelems = 1;
float minimum = 1;
float step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nBBS0,nelems,minimum,step);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nBBS0,nelems,minimum,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pointGenKernel<<<gridBlock,threadBlock>>>(points,dirs,nBBS0,nelems,minimum,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ee27e8e4360230535951422d4d59067de905d403.hip | // !!! This is a file automatically generated by hipify!!!
#include "bloomfilter.h"
#include <stdlib.h>
#include <iostream>
#include <semaphore.h>
#include <vector>
#include <bitset>
#include <cstring>
#include <ctime>
#include <omp.h>
#include <inttypes.h>
#include <iomanip>
#include <iomanip>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdio>
#include <chrono>
#include <iostream>
#include <fstream>
#include <cstdlib>
using namespace std;
#define BIG_CONSTANT(x) (x)
#define ROTL64(x,y) rotl64(x,y)
#define FORCE_INLINE inline __attribute__((always_inline))
#define BIT_ARRAY_SIZE 100000
#define SEED_VALUE_1 27
#define SEED_VALUE_2 58
#define SEED_VALUE_3 99
const int MAX = 26;
sem_t semaphore;
__device__ inline uint64_t rotl64(uint64_t x, int8_t r){
return (x << r) | (x >> (64 - r));
}
__device__ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
__device__ FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
{
return p[i];
}
__device__ void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed, uint64_t* hash, uint64_t* kvalues){
const uint8_t* data = (const uint8_t*)key;
const int nblocks = len/16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//const uint64_t *blocks = (const uint64_t *)(data);
uint64_t k1, k2;
for(int i = 0; i < nblocks; i++){
k1 = kvalues[i*2 + 0];
k2 = kvalues[i*2 + 1];
h1 ^= k1;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
h2 ^= k2;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
// uint64_t
k1 = 0;
//uint64_t
k2 = 0;
switch(len & 15){
case 15: k2 ^= ((uint64_t)tail[14]) << 48;
case 14: k2 ^= ((uint64_t)tail[13]) << 40;
case 13: k2 ^= ((uint64_t)tail[12]) << 32;
case 12: k2 ^= ((uint64_t)tail[11]) << 24;
case 11: k2 ^= ((uint64_t)tail[10]) << 16;
case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t*)hash)[0] = h1;
((uint64_t*)hash)[1] = h2;
}
string genRandomString(int n)
{
char alphabet[MAX] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z' };
string res = "";
for (int i = 0; i < n; i++)
res = res + alphabet[rand() % MAX];
return res;
}
__device__ void insertInHashTable(char* key, int length, int* d_bitArray, int idx, uint64_t* d_kvalues){
// Calculate 3 hashes and insert
uint64_t hash1[2];
uint64_t hash2[2];
uint64_t hash3[2];
int bit1, bit2, bit3;
const uint8_t* data = (const uint8_t*)key;
const int nblocks = length/16;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
const uint64_t *blocks = (const uint64_t *)(data);
uint64_t k1, k2;
//uint64_t kvalues[nblocks*2];
for(int i = 0; i < nblocks; i++){
k1 = getblock64(blocks,i*2+0);
k1 *= c1;
k1 = ROTL64(k1,31);
k1 *= c2;
k2 = getblock64(blocks,i*2+1);
k2 *= c2;
k2 = ROTL64(k2,33);
k2 *= c1;
d_kvalues[i*2 + 0] = k1;
d_kvalues[i*2 + 1] = k2;
}
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1, d_kvalues);
bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2, d_kvalues);
bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3, d_kvalues);
bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
// cout << "Bits set are: " << bit1 << "," << bit2 << " and " << bit3 << "\n";
// d_HashTable[bit1] = 1;
// d_HashTable[bit2] = 1;
// d_HashTable[bit3] = 1;
d_bitArray[idx*3+0] = bit1;
d_bitArray[idx*3+1] = bit2;
d_bitArray[idx*3+2] = bit3;
// printf("bit array at %d: %d\n", idx, bit1);
// printf("bit array at %d: %d\n", idx+1, bit2);
// printf("bit array at %d: %d\n", idx+2, bit3);
//cout << "Set bits: " << bit1 << ", " << bit2 << ", " << bit3 << "\n";
}
/*
void checkIfPresent(bitset<BIT_ARRAY_SIZE> HashTable, char* key, int length){
// Calculate 3 hashes and check bit
uint64_t hash1[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1);
int bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash2[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2);
int bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash3[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3);
int bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
if(HashTable.test(bit1) == 1 && HashTable.test(bit2) == 1 && HashTable.test(bit3) == 1){
cout << key << " might be present" << "\n";
}
else{
cout << key << " is definitely not present" << "\n";
}
}*/
__device__ char* getword(char* d_wordsToInsert, int idx, int lenOfWord){
char* temp = new char[lenOfWord + 1];
for(int i=0; i<lenOfWord; i++){
temp[i] = d_wordsToInsert[idx*lenOfWord+i];
}
temp[lenOfWord] = '\0';
return temp;
}
__global__ void parallelInsertion(char* d_wordsToInsert, int lenOfWord, int numIterations, int* d_bitArray, uint64_t* d_kvalues){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = blockDim.x * gridDim.x;
for(int i=idx; i<numIterations; i += gridStride){
char* cstr = getword(d_wordsToInsert, idx, lenOfWord);
insertInHashTable(cstr, lenOfWord, d_bitArray, idx, d_kvalues);
}
}
int main(int argc, char**argv){
int lenOfWord = atoi(argv[1]);
string str;
int numIterations = atoi(argv[2]);
char wordsToInsert[lenOfWord * numIterations];
for(int i = 0; i < numIterations; i++){
str = genRandomString(lenOfWord);
char* cstr = new char[lenOfWord + 1];
strcpy(cstr, str.c_str());
for(int j = 0; j < lenOfWord; j++){
wordsToInsert[i*lenOfWord+j] = cstr[j];
}
}
uint64_t kvalues[lenOfWord/16 * 2];
uint64_t* d_kvalues;
hipMalloc((void**)&d_kvalues, lenOfWord/16*2*sizeof(uint64_t));
hipMemcpy(d_kvalues, kvalues, lenOfWord/16*2*sizeof(uint64_t), hipMemcpyHostToDevice);
int bitArray[3*numIterations];
int* d_bitArray;
hipMalloc((void**)&d_bitArray, 3*numIterations*sizeof(int));
hipMemcpy(d_bitArray, bitArray, 3*numIterations*sizeof(int), hipMemcpyHostToDevice);
char* d_wordsToInsert;
hipMalloc((void**)&d_wordsToInsert, lenOfWord*numIterations*sizeof(char));
hipMemcpy(d_wordsToInsert, wordsToInsert, lenOfWord*numIterations*sizeof(char), hipMemcpyHostToDevice);
// int* HashTable = (int*)calloc(BIT_ARRAY_SIZE, sizeof(int));
// int* d_HashTable;
// hipMalloc((void**)&d_HashTable, BIT_ARRAY_SIZE*sizeof(int));
// hipMemset(d_HashTable, 0, BIT_ARRAY_SIZE*sizeof(int));
// hipMemcpy(d_HashTable, HashTable, BIT_ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice);
//time and call function here
auto t_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( parallelInsertion), dim3(1), dim3(100), 0, 0, d_wordsToInsert, lenOfWord, numIterations, d_bitArray, d_kvalues);
hipDeviceSynchronize();
auto t_end = std::chrono::high_resolution_clock::now();
hipMemcpy(bitArray, d_bitArray, 3*numIterations*sizeof(int), hipMemcpyDeviceToHost);
// hipFree(d_HashTable);
hipFree(d_wordsToInsert);
hipFree(d_bitArray);
hipFree(d_kvalues);
//free(HashTable);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count();
// cout << "Time taken for inserting " << numIterations << " records in CUDA parallelized version: " << elapsed_time_ms << setprecision(9);
// cout << " ms" << endl;
std::ofstream outfile;
outfile.open("./Times/cuda_times.txt", std::ios_base::app);
outfile << lenOfWord << ":" << numIterations << ":" << elapsed_time_ms << endl;
return 0;
}
| ee27e8e4360230535951422d4d59067de905d403.cu | #include "bloomfilter.h"
#include <stdlib.h>
#include <iostream>
#include <semaphore.h>
#include <vector>
#include <bitset>
#include <cstring>
#include <ctime>
#include <omp.h>
#include <inttypes.h>
#include <iomanip>
#include <iomanip>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdio>
#include <chrono>
#include <iostream>
#include <fstream>
#include <cstdlib>
using namespace std;
#define BIG_CONSTANT(x) (x)
#define ROTL64(x,y) rotl64(x,y)
#define FORCE_INLINE inline __attribute__((always_inline))
#define BIT_ARRAY_SIZE 100000
#define SEED_VALUE_1 27
#define SEED_VALUE_2 58
#define SEED_VALUE_3 99
const int MAX = 26;
sem_t semaphore;
__device__ inline uint64_t rotl64(uint64_t x, int8_t r){
return (x << r) | (x >> (64 - r));
}
__device__ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
__device__ FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
{
return p[i];
}
__device__ void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed, uint64_t* hash, uint64_t* kvalues){
const uint8_t* data = (const uint8_t*)key;
const int nblocks = len/16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//const uint64_t *blocks = (const uint64_t *)(data);
uint64_t k1, k2;
for(int i = 0; i < nblocks; i++){
k1 = kvalues[i*2 + 0];
k2 = kvalues[i*2 + 1];
h1 ^= k1;
h1 = ROTL64(h1,27);
h1 += h2;
h1 = h1*5+0x52dce729;
h2 ^= k2;
h2 = ROTL64(h2,31);
h2 += h1;
h2 = h2*5+0x38495ab5;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
// uint64_t
k1 = 0;
//uint64_t
k2 = 0;
switch(len & 15){
case 15: k2 ^= ((uint64_t)tail[14]) << 48;
case 14: k2 ^= ((uint64_t)tail[13]) << 40;
case 13: k2 ^= ((uint64_t)tail[12]) << 32;
case 12: k2 ^= ((uint64_t)tail[11]) << 24;
case 11: k2 ^= ((uint64_t)tail[10]) << 16;
case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t*)hash)[0] = h1;
((uint64_t*)hash)[1] = h2;
}
string genRandomString(int n)
{
char alphabet[MAX] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z' };
string res = "";
for (int i = 0; i < n; i++)
res = res + alphabet[rand() % MAX];
return res;
}
__device__ void insertInHashTable(char* key, int length, int* d_bitArray, int idx, uint64_t* d_kvalues){
// Calculate 3 hashes and insert
uint64_t hash1[2];
uint64_t hash2[2];
uint64_t hash3[2];
int bit1, bit2, bit3;
const uint8_t* data = (const uint8_t*)key;
const int nblocks = length/16;
uint64_t c1;
uint64_t c2;
c1 = BIG_CONSTANT(0x87c37b91114253d5);
c2 = BIG_CONSTANT(0x4cf5ad432745937f);
const uint64_t *blocks = (const uint64_t *)(data);
uint64_t k1, k2;
//uint64_t kvalues[nblocks*2];
for(int i = 0; i < nblocks; i++){
k1 = getblock64(blocks,i*2+0);
k1 *= c1;
k1 = ROTL64(k1,31);
k1 *= c2;
k2 = getblock64(blocks,i*2+1);
k2 *= c2;
k2 = ROTL64(k2,33);
k2 *= c1;
d_kvalues[i*2 + 0] = k1;
d_kvalues[i*2 + 1] = k2;
}
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1, d_kvalues);
bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2, d_kvalues);
bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3, d_kvalues);
bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
// cout << "Bits set are: " << bit1 << "," << bit2 << " and " << bit3 << "\n";
// d_HashTable[bit1] = 1;
// d_HashTable[bit2] = 1;
// d_HashTable[bit3] = 1;
d_bitArray[idx*3+0] = bit1;
d_bitArray[idx*3+1] = bit2;
d_bitArray[idx*3+2] = bit3;
// printf("bit array at %d: %d\n", idx, bit1);
// printf("bit array at %d: %d\n", idx+1, bit2);
// printf("bit array at %d: %d\n", idx+2, bit3);
//cout << "Set bits: " << bit1 << ", " << bit2 << ", " << bit3 << "\n";
}
/*
void checkIfPresent(bitset<BIT_ARRAY_SIZE> HashTable, char* key, int length){
// Calculate 3 hashes and check bit
uint64_t hash1[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_1, hash1);
int bit1 = (hash1[0] % BIT_ARRAY_SIZE + hash1[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash2[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_2, hash2);
int bit2 = (hash2[0] % BIT_ARRAY_SIZE + hash2[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
uint64_t hash3[2];
MurmurHash3_x64_128(key, length, SEED_VALUE_3, hash3);
int bit3 = (hash3[0] % BIT_ARRAY_SIZE + hash3[1] % BIT_ARRAY_SIZE) % BIT_ARRAY_SIZE;
if(HashTable.test(bit1) == 1 && HashTable.test(bit2) == 1 && HashTable.test(bit3) == 1){
cout << key << " might be present" << "\n";
}
else{
cout << key << " is definitely not present" << "\n";
}
}*/
__device__ char* getword(char* d_wordsToInsert, int idx, int lenOfWord){
char* temp = new char[lenOfWord + 1];
for(int i=0; i<lenOfWord; i++){
temp[i] = d_wordsToInsert[idx*lenOfWord+i];
}
temp[lenOfWord] = '\0';
return temp;
}
__global__ void parallelInsertion(char* d_wordsToInsert, int lenOfWord, int numIterations, int* d_bitArray, uint64_t* d_kvalues){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = blockDim.x * gridDim.x;
for(int i=idx; i<numIterations; i += gridStride){
char* cstr = getword(d_wordsToInsert, idx, lenOfWord);
insertInHashTable(cstr, lenOfWord, d_bitArray, idx, d_kvalues);
}
}
int main(int argc, char**argv){
int lenOfWord = atoi(argv[1]);
string str;
int numIterations = atoi(argv[2]);
char wordsToInsert[lenOfWord * numIterations];
for(int i = 0; i < numIterations; i++){
str = genRandomString(lenOfWord);
char* cstr = new char[lenOfWord + 1];
strcpy(cstr, str.c_str());
for(int j = 0; j < lenOfWord; j++){
wordsToInsert[i*lenOfWord+j] = cstr[j];
}
}
uint64_t kvalues[lenOfWord/16 * 2];
uint64_t* d_kvalues;
cudaMalloc((void**)&d_kvalues, lenOfWord/16*2*sizeof(uint64_t));
cudaMemcpy(d_kvalues, kvalues, lenOfWord/16*2*sizeof(uint64_t), cudaMemcpyHostToDevice);
int bitArray[3*numIterations];
int* d_bitArray;
cudaMalloc((void**)&d_bitArray, 3*numIterations*sizeof(int));
cudaMemcpy(d_bitArray, bitArray, 3*numIterations*sizeof(int), cudaMemcpyHostToDevice);
char* d_wordsToInsert;
cudaMalloc((void**)&d_wordsToInsert, lenOfWord*numIterations*sizeof(char));
cudaMemcpy(d_wordsToInsert, wordsToInsert, lenOfWord*numIterations*sizeof(char), cudaMemcpyHostToDevice);
// int* HashTable = (int*)calloc(BIT_ARRAY_SIZE, sizeof(int));
// int* d_HashTable;
// cudaMalloc((void**)&d_HashTable, BIT_ARRAY_SIZE*sizeof(int));
// cudaMemset(d_HashTable, 0, BIT_ARRAY_SIZE*sizeof(int));
// cudaMemcpy(d_HashTable, HashTable, BIT_ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
//time and call function here
auto t_start = std::chrono::high_resolution_clock::now();
parallelInsertion<<<1, 100>>>(d_wordsToInsert, lenOfWord, numIterations, d_bitArray, d_kvalues);
cudaDeviceSynchronize();
auto t_end = std::chrono::high_resolution_clock::now();
cudaMemcpy(bitArray, d_bitArray, 3*numIterations*sizeof(int), cudaMemcpyDeviceToHost);
// cudaFree(d_HashTable);
cudaFree(d_wordsToInsert);
cudaFree(d_bitArray);
cudaFree(d_kvalues);
//free(HashTable);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end-t_start).count();
// cout << "Time taken for inserting " << numIterations << " records in CUDA parallelized version: " << elapsed_time_ms << setprecision(9);
// cout << " ms" << endl;
std::ofstream outfile;
outfile.open("./Times/cuda_times.txt", std::ios_base::app);
outfile << lenOfWord << ":" << numIterations << ":" << elapsed_time_ms << endl;
return 0;
}
|
b7721266313881a4d873c0213586fdf13a045964.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/loss_layers.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom,
const Dtype* label, const Dtype* center, Dtype* distance) {
CUDA_KERNEL_LOOP(index, nthreads) {
int m = index / K;
int k = index % K;
const int label_value = static_cast<int>(label[m]);
// distance(i) = x(i) - c_{y(i)}
distance[index] = bottom[index] - center[label_value * K + k];
}
}
template <typename Dtype>
__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K,
const Dtype* label, const Dtype* distance, Dtype* variation_sum,
Dtype* center_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int count = 0;
for (int m = 0; m < M; m++) {
const int label_value = static_cast<int>(label[m]);
if (label_value == index) {
count++;
for (int k = 0; k < K; k++) {
variation_sum[index * K + k] -= distance[m * K + k];
}
}
}
for (int k = 0; k < K; k++) {
center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);
}
}
}
template <typename Dtype>
void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int nthreads = M_ * K_;
hipLaunchKernelGGL(( Compute_distance_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);
Dtype loss = dot / M_ / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int nthreads = N_;
caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());
hipLaunchKernelGGL(( Compute_center_diff_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(),
variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());
if (propagate_down[0]) {
caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_,
distance_.gpu_data(), bottom[0]->mutable_gpu_diff());
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer);
} // namespace caffe
| b7721266313881a4d873c0213586fdf13a045964.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/loss_layers.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom,
const Dtype* label, const Dtype* center, Dtype* distance) {
CUDA_KERNEL_LOOP(index, nthreads) {
int m = index / K;
int k = index % K;
const int label_value = static_cast<int>(label[m]);
// distance(i) = x(i) - c_{y(i)}
distance[index] = bottom[index] - center[label_value * K + k];
}
}
template <typename Dtype>
__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K,
const Dtype* label, const Dtype* distance, Dtype* variation_sum,
Dtype* center_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int count = 0;
for (int m = 0; m < M; m++) {
const int label_value = static_cast<int>(label[m]);
if (label_value == index) {
count++;
for (int k = 0; k < K; k++) {
variation_sum[index * K + k] -= distance[m * K + k];
}
}
}
for (int k = 0; k < K; k++) {
center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);
}
}
}
template <typename Dtype>
void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int nthreads = M_ * K_;
Compute_distance_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);
Dtype loss = dot / M_ / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int nthreads = N_;
caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());
Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(),
variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());
if (propagate_down[0]) {
caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_,
distance_.gpu_data(), bottom[0]->mutable_gpu_diff());
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer);
} // namespace caffe
|
bd18d3f83b9295d3c49d554b6c7e1e4a121dc803.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2013. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-03-24 Martin Uecker <[email protected]>
*
*
* This file defines basic operations on vectors of floats/complex floats
* for operations on the GPU. See the CPU version (vecops.c) for more
* information.
*/
#include <stdio.h>
#include <stdbool.h>
#include <assert.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include "num/gpukrnls.h"
#if 1
// see Dara's src/calib/calibcu.cu for how to get
// runtime info
// limited by hardware to 1024 on most devices
// should be a multiple of 32 (warp size)
#define BLOCKSIZE 1024
static int blocksize(int N)
{
return BLOCKSIZE;
}
static int gridsize(int N)
{
return (N + BLOCKSIZE - 1) / BLOCKSIZE;
}
#else
// http://stackoverflow.com/questions/5810447/cuda-block-and-grid-size-efficiencies
#define WARPSIZE 32
#define MAXBLOCKS (16 * 8)
// 16 multi processor times 8 blocks
#define MIN(x, y) ((x < y) ? (x) : (y))
#define MAX(x, y) ((x > y) ? (x) : (y))
static int blocksize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return WARPSIZE * warps_block;
}
static int gridsize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return MIN(MAXBLOCKS, MAX(1, warps_total / warps_block));
}
#endif
__global__ void kern_float2double(int N, double* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_float2double(long N, double* dst, const float* src)
{
hipLaunchKernelGGL(( kern_float2double), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_double2float(int N, float* dst, const double* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_double2float(long N, float* dst, const double* src)
{
hipLaunchKernelGGL(( kern_double2float), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_xpay(int N, float beta, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = dst[i] * beta + src[i];
}
extern "C" void cuda_xpay(long N, float beta, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_xpay), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, beta, dst, src);
}
__global__ void kern_smul(int N, float alpha, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = alpha * src[i];
}
extern "C" void cuda_smul(long N, float alpha, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_smul), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, alpha, dst, src);
}
typedef void (*cuda_3op_f)(int N, float* dst, const float* src1, const float* src2);
extern "C" void cuda_3op(cuda_3op_f krn, int N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( krn), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_add(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] + src2[i];
}
extern "C" void cuda_add(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_add, N, dst, src1, src2);
}
__global__ void kern_sub(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] - src2[i];
}
extern "C" void cuda_sub(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_sub, N, dst, src1, src2);
}
__global__ void kern_mul(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] * src2[i];
}
extern "C" void cuda_mul(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_mul, N, dst, src1, src2);
}
__global__ void kern_div(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] / src2[i];
}
extern "C" void cuda_div(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_div, N, dst, src1, src2);
}
__global__ void kern_fmac(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_fmac, N, dst, src1, src2);
}
__global__ void kern_fmac2(int N, double* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac2(long N, double* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_fmac2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_zmul(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], src2[i]);
}
extern "C" void cuda_zmul(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zmul), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], src2[i]);
}
extern "C" void cuda_zdiv(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zdiv), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], src2[i]));
}
extern "C" void cuda_zfmac(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmac), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac2(int N, hipDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], src2[i])));
}
extern "C" void cuda_zfmac2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmac2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (hipDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zmulc(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], cuConjf(src2[i]));
}
extern "C" void cuda_zmulc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zmulc), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], cuConjf(src2[i])));
}
extern "C" void cuda_zfmacc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmacc), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc2(int N, hipDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], cuConjf(src2[i]))));
}
extern "C" void cuda_zfmacc2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmacc2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (hipDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_pow(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = powf(src1[i], src2[i]);
}
extern "C" void cuda_pow(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_pow, N, dst, src1, src2);
}
__device__ hipDoubleComplex zexpD(hipDoubleComplex x)
{
double sc = exp(cuCreal(x));
double si;
double co;
sincos(cuCimag(x), &si, &co);
return make_cuDoubleComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zexp(cuFloatComplex x)
{
float sc = expf(cuCrealf(x));
float si;
float co;
sincosf(cuCimagf(x), &si, &co);
return make_cuFloatComplex(sc * co, sc * si);
}
__device__ float zarg(cuFloatComplex x)
{
return atan2(cuCimagf(x), cuCrealf(x));
}
__device__ cuFloatComplex zlog(cuFloatComplex x)
{
return make_cuFloatComplex(log(cuCabsf(x)), zarg(x));
}
// x^y = e^{y ln(x)} = e^{y
__device__ cuFloatComplex zpow(cuFloatComplex x, cuFloatComplex y)
{
return zexp(cuCmulf(y, zlog(x)));
}
__global__ void kern_zpow(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = zpow(src1[i], src2[i]);
}
extern "C" void cuda_zpow(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zpow), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_sqrt(int N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = sqrtf(fabs(src[i]));
}
extern "C" void cuda_sqrt(long N, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_sqrt), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_zconj(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuConjf(src[i]);
}
extern "C" void cuda_zconj(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zconj), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcmp(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(((cuCrealf(src1[i]) == cuCrealf(src2[i])) && (cuCimagf(src1[i]) == cuCimagf(src2[i]))) ? 1. : 0, 0.);
}
extern "C" void cuda_zcmp(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zcmp), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv_reg(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2, cuFloatComplex lambda)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], cuCaddf(src2[i], lambda));
}
extern "C" void cuda_zdiv_reg(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2, _Complex float lambda)
{
hipLaunchKernelGGL(( kern_zdiv_reg), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2, make_cuFloatComplex(__real(lambda), __imag(lambda)));
}
__global__ void kern_zphsr(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = (0. == abs) ? make_cuFloatComplex(1., 0.) : (cuCdivf(src[i], make_cuFloatComplex(abs, 0.)));
}
}
extern "C" void cuda_zphsr(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zphsr), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexpj(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = zexp(make_cuFloatComplex(0., abs));
}
}
extern "C" void cuda_zexpj(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zexpj), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zarg(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zarg(src[i]), 0.);
}
extern "C" void cuda_zarg(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zarg), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
/**
* (GPU) Step (1) of soft thesholding, y = ST(x, lambda).
* Only computes the residual, resid = MAX( (abs(x) - lambda)/abs(x)), 0 )
*
* @param N number of elements
* @param lambda threshold parameter
* @param d pointer to destination, resid
* @param x pointer to input
*/
__global__ void kern_zsoftthresh_half(int N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
//d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
d[i] = (red > 0.) ? make_cuFloatComplex(red / norm, 0.) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh_half(long N, float lambda, _Complex float* d, const _Complex float* x)
{
hipLaunchKernelGGL(( kern_zsoftthresh_half), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_zsoftthresh(int N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh(long N, float lambda, _Complex float* d, const _Complex float* x)
{
hipLaunchKernelGGL(( kern_zsoftthresh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_softthresh_half(int N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
extern "C" void cuda_softthresh_half(long N, float lambda, float* d, const float* x)
{
hipLaunchKernelGGL(( kern_softthresh_half), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, d, x);
}
__global__ void kern_softthresh(int N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm * x[i]) : 0.;
}
}
extern "C" void cuda_softthresh(long N, float lambda, float* d, const float* x)
{
hipLaunchKernelGGL(( kern_softthresh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, d, x);
}
__global__ void kern_zreal(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(cuCrealf(src[i]), 0.);
}
extern "C" void cuda_zreal(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zreal), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_le(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = (src1[i] <= src2[i]);
}
extern "C" void cuda_le(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_zcmp), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__device__ cuFloatComplex cuDouble2Float(hipDoubleComplex x)
{
return make_cuFloatComplex(cuCreal(x), cuCimag(x));
}
__device__ hipDoubleComplex cuFloat2Double(cuFloatComplex x)
{
return make_cuDoubleComplex(cuCrealf(x), cuCimagf(x));
}
// identical copy in num/fft.c
__device__ double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
__device__ hipDoubleComplex fftmod_phase2(long n, int j, bool inv, double phase)
{
phase += fftmod_phase(n, j);
double rem = phase - floor(phase);
double sgn = inv ? -1. : 1.;
#if 1
if (rem == 0.)
return make_cuDoubleComplex(1., 0.);
if (rem == 0.5)
return make_cuDoubleComplex(-1., 0.);
if (rem == 0.25)
return make_cuDoubleComplex(0., sgn);
if (rem == 0.75)
return make_cuDoubleComplex(0., -sgn);
#endif
return zexpD(make_cuDoubleComplex(0., M_PI * 2. * sgn * rem));
}
__global__ void kern_zfftmod(int N, cuFloatComplex* dst, const cuFloatComplex* src, unsigned int n, _Bool inv, double phase)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
for (int j = 0; j < n; j++)
dst[i * n + j] = cuDouble2Float(cuCmul(fftmod_phase2(n, j, inv, phase),
cuFloat2Double(src[i * n + j])));
}
extern "C" void cuda_zfftmod(long N, _Complex float* dst, const _Complex float* src, unsigned int n, _Bool inv, double phase)
{
hipLaunchKernelGGL(( kern_zfftmod), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src, n, inv, phase);
}
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void kern_max(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = MAX(src1[i], src2[i]);
}
extern "C" void cuda_max(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_max), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_min(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = MIN(src1[i], src2[i]);
}
extern "C" void cuda_min(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_min), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
| bd18d3f83b9295d3c49d554b6c7e1e4a121dc803.cu | /* Copyright 2013. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-03-24 Martin Uecker <[email protected]>
*
*
* This file defines basic operations on vectors of floats/complex floats
* for operations on the GPU. See the CPU version (vecops.c) for more
* information.
*/
#include <stdio.h>
#include <stdbool.h>
#include <assert.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuComplex.h>
#include "num/gpukrnls.h"
#if 1
// see Dara's src/calib/calibcu.cu for how to get
// runtime info
// limited by hardware to 1024 on most devices
// should be a multiple of 32 (warp size)
#define BLOCKSIZE 1024
static int blocksize(int N)
{
return BLOCKSIZE;
}
static int gridsize(int N)
{
return (N + BLOCKSIZE - 1) / BLOCKSIZE;
}
#else
// http://stackoverflow.com/questions/5810447/cuda-block-and-grid-size-efficiencies
#define WARPSIZE 32
#define MAXBLOCKS (16 * 8)
// 16 multi processor times 8 blocks
#define MIN(x, y) ((x < y) ? (x) : (y))
#define MAX(x, y) ((x > y) ? (x) : (y))
static int blocksize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return WARPSIZE * warps_block;
}
static int gridsize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return MIN(MAXBLOCKS, MAX(1, warps_total / warps_block));
}
#endif
__global__ void kern_float2double(int N, double* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_float2double(long N, double* dst, const float* src)
{
kern_float2double<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_double2float(int N, float* dst, const double* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_double2float(long N, float* dst, const double* src)
{
kern_double2float<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_xpay(int N, float beta, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = dst[i] * beta + src[i];
}
extern "C" void cuda_xpay(long N, float beta, float* dst, const float* src)
{
kern_xpay<<<gridsize(N), blocksize(N)>>>(N, beta, dst, src);
}
__global__ void kern_smul(int N, float alpha, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = alpha * src[i];
}
extern "C" void cuda_smul(long N, float alpha, float* dst, const float* src)
{
kern_smul<<<gridsize(N), blocksize(N)>>>(N, alpha, dst, src);
}
typedef void (*cuda_3op_f)(int N, float* dst, const float* src1, const float* src2);
extern "C" void cuda_3op(cuda_3op_f krn, int N, float* dst, const float* src1, const float* src2)
{
krn<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_add(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] + src2[i];
}
extern "C" void cuda_add(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_add, N, dst, src1, src2);
}
__global__ void kern_sub(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] - src2[i];
}
extern "C" void cuda_sub(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_sub, N, dst, src1, src2);
}
__global__ void kern_mul(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] * src2[i];
}
extern "C" void cuda_mul(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_mul, N, dst, src1, src2);
}
__global__ void kern_div(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = src1[i] / src2[i];
}
extern "C" void cuda_div(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_div, N, dst, src1, src2);
}
__global__ void kern_fmac(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_fmac, N, dst, src1, src2);
}
__global__ void kern_fmac2(int N, double* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac2(long N, double* dst, const float* src1, const float* src2)
{
kern_fmac2<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_zmul(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], src2[i]);
}
extern "C" void cuda_zmul(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zmul<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], src2[i]);
}
extern "C" void cuda_zdiv(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zdiv<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], src2[i]));
}
extern "C" void cuda_zfmac(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmac<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac2(int N, cuDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], src2[i])));
}
extern "C" void cuda_zfmac2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmac2<<<gridsize(N), blocksize(N)>>>(N, (cuDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zmulc(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], cuConjf(src2[i]));
}
extern "C" void cuda_zmulc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zmulc<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], cuConjf(src2[i])));
}
extern "C" void cuda_zfmacc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmacc<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc2(int N, cuDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], cuConjf(src2[i]))));
}
extern "C" void cuda_zfmacc2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmacc2<<<gridsize(N), blocksize(N)>>>(N, (cuDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_pow(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = powf(src1[i], src2[i]);
}
extern "C" void cuda_pow(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_pow, N, dst, src1, src2);
}
__device__ cuDoubleComplex zexpD(cuDoubleComplex x)
{
double sc = exp(cuCreal(x));
double si;
double co;
sincos(cuCimag(x), &si, &co);
return make_cuDoubleComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zexp(cuFloatComplex x)
{
float sc = expf(cuCrealf(x));
float si;
float co;
sincosf(cuCimagf(x), &si, &co);
return make_cuFloatComplex(sc * co, sc * si);
}
__device__ float zarg(cuFloatComplex x)
{
return atan2(cuCimagf(x), cuCrealf(x));
}
__device__ cuFloatComplex zlog(cuFloatComplex x)
{
return make_cuFloatComplex(log(cuCabsf(x)), zarg(x));
}
// x^y = e^{y ln(x)} = e^{y
__device__ cuFloatComplex zpow(cuFloatComplex x, cuFloatComplex y)
{
return zexp(cuCmulf(y, zlog(x)));
}
__global__ void kern_zpow(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = zpow(src1[i], src2[i]);
}
extern "C" void cuda_zpow(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zpow<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_sqrt(int N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = sqrtf(fabs(src[i]));
}
extern "C" void cuda_sqrt(long N, float* dst, const float* src)
{
kern_sqrt<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_zconj(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuConjf(src[i]);
}
extern "C" void cuda_zconj(long N, _Complex float* dst, const _Complex float* src)
{
kern_zconj<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcmp(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(((cuCrealf(src1[i]) == cuCrealf(src2[i])) && (cuCimagf(src1[i]) == cuCimagf(src2[i]))) ? 1. : 0, 0.);
}
extern "C" void cuda_zcmp(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zcmp<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv_reg(int N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2, cuFloatComplex lambda)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], cuCaddf(src2[i], lambda));
}
extern "C" void cuda_zdiv_reg(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2, _Complex float lambda)
{
kern_zdiv_reg<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2, make_cuFloatComplex(__real(lambda), __imag(lambda)));
}
__global__ void kern_zphsr(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = (0. == abs) ? make_cuFloatComplex(1., 0.) : (cuCdivf(src[i], make_cuFloatComplex(abs, 0.)));
}
}
extern "C" void cuda_zphsr(long N, _Complex float* dst, const _Complex float* src)
{
kern_zphsr<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexpj(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = zexp(make_cuFloatComplex(0., abs));
}
}
extern "C" void cuda_zexpj(long N, _Complex float* dst, const _Complex float* src)
{
kern_zexpj<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zarg(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zarg(src[i]), 0.);
}
extern "C" void cuda_zarg(long N, _Complex float* dst, const _Complex float* src)
{
kern_zarg<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
/**
* (GPU) Step (1) of soft thesholding, y = ST(x, lambda).
* Only computes the residual, resid = MAX( (abs(x) - lambda)/abs(x)), 0 )
*
* @param N number of elements
* @param lambda threshold parameter
* @param d pointer to destination, resid
* @param x pointer to input
*/
__global__ void kern_zsoftthresh_half(int N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
//d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
d[i] = (red > 0.) ? make_cuFloatComplex(red / norm, 0.) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh_half(long N, float lambda, _Complex float* d, const _Complex float* x)
{
kern_zsoftthresh_half<<<gridsize(N), blocksize(N)>>>(N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_zsoftthresh(int N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh(long N, float lambda, _Complex float* d, const _Complex float* x)
{
kern_zsoftthresh<<<gridsize(N), blocksize(N)>>>(N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_softthresh_half(int N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
extern "C" void cuda_softthresh_half(long N, float lambda, float* d, const float* x)
{
kern_softthresh_half<<<gridsize(N), blocksize(N)>>>(N, lambda, d, x);
}
__global__ void kern_softthresh(int N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm * x[i]) : 0.;
}
}
extern "C" void cuda_softthresh(long N, float lambda, float* d, const float* x)
{
kern_softthresh<<<gridsize(N), blocksize(N)>>>(N, lambda, d, x);
}
__global__ void kern_zreal(int N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(cuCrealf(src[i]), 0.);
}
extern "C" void cuda_zreal(long N, _Complex float* dst, const _Complex float* src)
{
kern_zreal<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_le(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = (src1[i] <= src2[i]);
}
extern "C" void cuda_le(long N, float* dst, const float* src1, const float* src2)
{
kern_zcmp<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__device__ cuFloatComplex cuDouble2Float(cuDoubleComplex x)
{
return make_cuFloatComplex(cuCreal(x), cuCimag(x));
}
__device__ cuDoubleComplex cuFloat2Double(cuFloatComplex x)
{
return make_cuDoubleComplex(cuCrealf(x), cuCimagf(x));
}
// identical copy in num/fft.c
__device__ double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
__device__ cuDoubleComplex fftmod_phase2(long n, int j, bool inv, double phase)
{
phase += fftmod_phase(n, j);
double rem = phase - floor(phase);
double sgn = inv ? -1. : 1.;
#if 1
if (rem == 0.)
return make_cuDoubleComplex(1., 0.);
if (rem == 0.5)
return make_cuDoubleComplex(-1., 0.);
if (rem == 0.25)
return make_cuDoubleComplex(0., sgn);
if (rem == 0.75)
return make_cuDoubleComplex(0., -sgn);
#endif
return zexpD(make_cuDoubleComplex(0., M_PI * 2. * sgn * rem));
}
__global__ void kern_zfftmod(int N, cuFloatComplex* dst, const cuFloatComplex* src, unsigned int n, _Bool inv, double phase)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
for (int j = 0; j < n; j++)
dst[i * n + j] = cuDouble2Float(cuCmul(fftmod_phase2(n, j, inv, phase),
cuFloat2Double(src[i * n + j])));
}
extern "C" void cuda_zfftmod(long N, _Complex float* dst, const _Complex float* src, unsigned int n, _Bool inv, double phase)
{
kern_zfftmod<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src, n, inv, phase);
}
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void kern_max(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = MAX(src1[i], src2[i]);
}
extern "C" void cuda_max(long N, float* dst, const float* src1, const float* src2)
{
kern_max<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_min(int N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = MIN(src1[i], src2[i]);
}
extern "C" void cuda_min(long N, float* dst, const float* src1, const float* src2)
{
kern_min<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
|
05823d707604b265c01069390cb27d53f47862b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include "inc.h"
float * GenVectors(int numVectors, int numDims){
srand(10);
float * buf=(float*)malloc(sizeof(float)*numVectors*numDims);
for(int i=0;i<numVectors*numDims;i++)
buf[i]=rand()%100;
return buf;
}
__global__ void
similarity_kernel( float* src, float* tgt, int num, int num_dims)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int bd = blockDim.x;
for (int i = tx; i < num; i+=bd) {
int idx = bx * num + i;
const float * a=src + bx*num_dims;
const float * b=src + i*num_dims;
float ab=0;
float aa=0;
float bb=0;
float ai,bi;
for(int i=0;i<num_dims;i++){
ai=a[i];
bi=b[i];
ab+=ai*bi;
aa+=ai*ai;
bb+=bi*bi;
}
tgt[idx]=sqrt((ab*ab)/(aa*bb));
}
}
int
main(int argc, char** argv)
{
hipSetDevice( 0 );
double t1=get_time();
int num_points;
int num_dims;
if(!get_opt(argc,argv,"np", num_points) || !get_opt(argc,argv,"nd",num_dims)){
return 1;
}
float * points=GenVectors(num_points, num_dims);
unsigned int mem_size = sizeof(float)*num_points*num_dims;
unsigned int mem_size_tgt = sizeof(float) * num_points * num_points;
float* d_src;
cutilSafeCall(hipMalloc((void**) &d_src, mem_size));
cutilSafeCall(hipMemcpy(d_src, points, mem_size,
hipMemcpyHostToDevice) );
float* result = (float *)malloc(mem_size_tgt);
float* d_tgt;
cutilSafeCall(hipMalloc((void**) &d_tgt, mem_size_tgt));
double t2=get_time();
printf("init: %lf\n", t2-t1);
dim3 threads(128);
dim3 grid(num_points);
hipLaunchKernelGGL(( similarity_kernel), dim3(grid), dim3(threads) , 0, 0, d_src, d_tgt, num_points, num_dims);
double t3=get_time();
printf("computation time: %lf\n", t3-t2);
cutilSafeCall(hipMemcpy(result, d_tgt, mem_size_tgt,
hipMemcpyDeviceToHost) );
return 0;
}
| 05823d707604b265c01069390cb27d53f47862b8.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include "inc.h"
float * GenVectors(int numVectors, int numDims){
srand(10);
float * buf=(float*)malloc(sizeof(float)*numVectors*numDims);
for(int i=0;i<numVectors*numDims;i++)
buf[i]=rand()%100;
return buf;
}
__global__ void
similarity_kernel( float* src, float* tgt, int num, int num_dims)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int bd = blockDim.x;
for (int i = tx; i < num; i+=bd) {
int idx = bx * num + i;
const float * a=src + bx*num_dims;
const float * b=src + i*num_dims;
float ab=0;
float aa=0;
float bb=0;
float ai,bi;
for(int i=0;i<num_dims;i++){
ai=a[i];
bi=b[i];
ab+=ai*bi;
aa+=ai*ai;
bb+=bi*bi;
}
tgt[idx]=sqrt((ab*ab)/(aa*bb));
}
}
int
main(int argc, char** argv)
{
cudaSetDevice( 0 );
double t1=get_time();
int num_points;
int num_dims;
if(!get_opt(argc,argv,"np", num_points) || !get_opt(argc,argv,"nd",num_dims)){
return 1;
}
float * points=GenVectors(num_points, num_dims);
unsigned int mem_size = sizeof(float)*num_points*num_dims;
unsigned int mem_size_tgt = sizeof(float) * num_points * num_points;
float* d_src;
cutilSafeCall(cudaMalloc((void**) &d_src, mem_size));
cutilSafeCall(cudaMemcpy(d_src, points, mem_size,
cudaMemcpyHostToDevice) );
float* result = (float *)malloc(mem_size_tgt);
float* d_tgt;
cutilSafeCall(cudaMalloc((void**) &d_tgt, mem_size_tgt));
double t2=get_time();
printf("init: %lf\n", t2-t1);
dim3 threads(128);
dim3 grid(num_points);
similarity_kernel<<< grid, threads >>>(d_src, d_tgt, num_points, num_dims);
double t3=get_time();
printf("computation time: %lf\n", t3-t2);
cutilSafeCall(cudaMemcpy(result, d_tgt, mem_size_tgt,
cudaMemcpyDeviceToHost) );
return 0;
}
|
5801d173e995df67d8bf14453427b20ddfe1b662.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Authors:
* Oren Freifeld, [email protected]
* Yixin Li, Email: [email protected]
*/
__global__ void clear_fields(int * count, double * log_count,
int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s, unsigned long long int * sigma_s_h,
const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
count[k] = 0;
log_count[k] = 0.0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
mu_i[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
mu_s[2*k] = mu_s[2*k+1] = 0;
sigma_s_h[3*k] = sigma_s_h[3*k+1] = sigma_s_h[3*k+2] = 0;
}
__global__ void sum_by_label(
double * img, int * seg,
int * count, int * mu_i_h, int * mu_s_h, unsigned long long int * sigma_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
int x = t % xdim;
int y = t / xdim;
int xx = x * x;
int xy = x * y;
int yy = y * y;
atomicAdd(&mu_s_h[2*k], x);
atomicAdd(&mu_s_h[2*k+1], y);
atomicAdd(&sigma_s_h[3*k], xx);
atomicAdd(&sigma_s_h[3*k+1], xy);
atomicAdd(&sigma_s_h[3*k+2], yy);
}
__global__ void calculate_mu_and_sigma(
int * counts, double* log_count, int * mu_i_h, int * mu_s_h,
double * mu_i, double * mu_s,
unsigned long long int * sigma_s_h, double * prior_sigma_s,
double * sigma_s, double * logdet_Sigma_s, double * J_s,
const int prior_count, const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
double mu_x = 0.0;
double mu_y = 0.0;
//calculate the mean
if (count>0){
//X[k] /= count
log_count[k] = log(count);
mu_x = mu_s_h[2*k] / count;
mu_y = mu_s_h[2*k+1]/ count;
mu_s[2*k] = mu_x;
mu_s[2*k+1] = mu_y;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
//calculate the covariance
double C00,C01,C11;
C00 = C01 = C11 = 0;
int total_count = counts[k] + prior_count;
if (count > 3){
//update cumulative count and covariance
C00= sigma_s_h[3*k] - mu_x * mu_x * count;
C01= sigma_s_h[3*k+1] - mu_x * mu_y * count;
C11= sigma_s_h[3*k+2] - mu_y * mu_y * count;
}
C00 = (prior_sigma_s[k*4] + C00) / (double(total_count) - 3);
C01 = (prior_sigma_s[k*4+1] + C01)/ (double(total_count) - 3);
C11 = (prior_sigma_s[k*4+3] + C11) / (double(total_count) - 3);
double detC = C00 * C11 - C01 * C01;
if (detC <= 0){
C00 = C00 + 0.00001;
C11 = C11 + 0.00001;
detC = C00*C11-C01*C01;
if(detC <=0) detC = 0.0001;//hack
}
//set the sigma_space
sigma_s[k*4] = C00;
sigma_s[k*4+1] = C01;
sigma_s[k*4+2] = C01;
sigma_s[k*4+3] = C11;
//Take the inverse of sigma_space to get J_space
J_s[k*4] = C11 / detC;
J_s[k*4+1] = -C01/ detC;
J_s[k*4+2] = -C01/ detC;
J_s[k*4+3] = C00/ detC;
logdet_Sigma_s[k] = log(detC);
}
__global__ void clear_fields_2(int * count, int * mu_i_h, int * mu_s_h, const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
//clear the fields
count[k] = 0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
}
__global__ void sum_by_label_2(double * img, int * seg, int * count, int * mu_i_h, int * mu_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x *blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
atomicAdd(&mu_s_h[2*k], t % xdim);
atomicAdd(&mu_s_h[2*k+1], t / xdim);
}
__global__ void calculate_mu(
int * counts, int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s,
const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
if (count>0){
mu_s[2*k] = mu_s_h[2*k] / count;
mu_s[2*k+1] = mu_s_h[2*k+1]/ count;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
} | 5801d173e995df67d8bf14453427b20ddfe1b662.cu | /*
* Authors:
* Oren Freifeld, [email protected]
* Yixin Li, Email: [email protected]
*/
__global__ void clear_fields(int * count, double * log_count,
int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s, unsigned long long int * sigma_s_h,
const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
count[k] = 0;
log_count[k] = 0.0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
mu_i[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
mu_s[2*k] = mu_s[2*k+1] = 0;
sigma_s_h[3*k] = sigma_s_h[3*k+1] = sigma_s_h[3*k+2] = 0;
}
__global__ void sum_by_label(
double * img, int * seg,
int * count, int * mu_i_h, int * mu_s_h, unsigned long long int * sigma_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
int x = t % xdim;
int y = t / xdim;
int xx = x * x;
int xy = x * y;
int yy = y * y;
atomicAdd(&mu_s_h[2*k], x);
atomicAdd(&mu_s_h[2*k+1], y);
atomicAdd(&sigma_s_h[3*k], xx);
atomicAdd(&sigma_s_h[3*k+1], xy);
atomicAdd(&sigma_s_h[3*k+2], yy);
}
__global__ void calculate_mu_and_sigma(
int * counts, double* log_count, int * mu_i_h, int * mu_s_h,
double * mu_i, double * mu_s,
unsigned long long int * sigma_s_h, double * prior_sigma_s,
double * sigma_s, double * logdet_Sigma_s, double * J_s,
const int prior_count, const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
double mu_x = 0.0;
double mu_y = 0.0;
//calculate the mean
if (count>0){
//X[k] /= count
log_count[k] = log(count);
mu_x = mu_s_h[2*k] / count;
mu_y = mu_s_h[2*k+1]/ count;
mu_s[2*k] = mu_x;
mu_s[2*k+1] = mu_y;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
//calculate the covariance
double C00,C01,C11;
C00 = C01 = C11 = 0;
int total_count = counts[k] + prior_count;
if (count > 3){
//update cumulative count and covariance
C00= sigma_s_h[3*k] - mu_x * mu_x * count;
C01= sigma_s_h[3*k+1] - mu_x * mu_y * count;
C11= sigma_s_h[3*k+2] - mu_y * mu_y * count;
}
C00 = (prior_sigma_s[k*4] + C00) / (double(total_count) - 3);
C01 = (prior_sigma_s[k*4+1] + C01)/ (double(total_count) - 3);
C11 = (prior_sigma_s[k*4+3] + C11) / (double(total_count) - 3);
double detC = C00 * C11 - C01 * C01;
if (detC <= 0){
C00 = C00 + 0.00001;
C11 = C11 + 0.00001;
detC = C00*C11-C01*C01;
if(detC <=0) detC = 0.0001;//hack
}
//set the sigma_space
sigma_s[k*4] = C00;
sigma_s[k*4+1] = C01;
sigma_s[k*4+2] = C01;
sigma_s[k*4+3] = C11;
//Take the inverse of sigma_space to get J_space
J_s[k*4] = C11 / detC;
J_s[k*4+1] = -C01/ detC;
J_s[k*4+2] = -C01/ detC;
J_s[k*4+3] = C00/ detC;
logdet_Sigma_s[k] = log(detC);
}
__global__ void clear_fields_2(int * count, int * mu_i_h, int * mu_s_h, const int dim_i, const int nsuperpixel){
int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
//clear the fields
count[k] = 0;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i_h[dim_i*k+d] = 0;
}
mu_s_h[2*k] = mu_s_h[2*k+1] = 0;
}
__global__ void sum_by_label_2(double * img, int * seg, int * count, int * mu_i_h, int * mu_s_h,
const int xdim, const int ydim, const int dim_i, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x *blockDim.x;
if (t>=nPts) return;
//get the label
const int k = seg[t];
atomicAdd(&count[k] , 1);
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
atomicAdd(&mu_i_h[dim_i*k+d], img[dim_i*t+d]);
}
atomicAdd(&mu_s_h[2*k], t % xdim);
atomicAdd(&mu_s_h[2*k+1], t / xdim);
}
__global__ void calculate_mu(
int * counts, int * mu_i_h, int * mu_s_h, double * mu_i, double * mu_s,
const int dim_i, const int nsuperpixel)
{
const int k = threadIdx.x + blockIdx.x * blockDim.x; // the label
if (k>=nsuperpixel) return;
double count = double (counts[k]);
if (count>0){
mu_s[2*k] = mu_s_h[2*k] / count;
mu_s[2*k+1] = mu_s_h[2*k+1]/ count;
#pragma unroll
for (int d = 0; d<dim_i; d=d+1){
mu_i[dim_i*k+d] = mu_i_h[dim_i*k+d]/count;
}
}
} |
35325e4f3a3a883c96aecd19ba9e108d92dadc54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void atomic_red(const float *gdata, float *out){
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < N) atomicAdd(out, gdata[idx]);
} | 35325e4f3a3a883c96aecd19ba9e108d92dadc54.cu | #include "includes.h"
__global__ void atomic_red(const float *gdata, float *out){
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < N) atomicAdd(out, gdata[idx]);
} |
da0d4cc403f5349dd1d1915baea4e3fe7be19fa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fill.cuh"
#if defined(CUDA)
namespace Cuda {
namespace Kernel {
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, int64_t size) {
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
buffer[i] = value;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, int64_t size, StreamRef stream) {
if (size > 0) {
dim3 numBlocks;
const int blockSize = 128;
numBlocks.x = (size + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream >> > (buffer, value, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, int64_t size, StreamRef stream);
FILL_BUFFER(float);
FILL_BUFFER(int);
}
}
#endif
| da0d4cc403f5349dd1d1915baea4e3fe7be19fa5.cu | #include "fill.cuh"
#if defined(CUDA)
namespace Cuda {
namespace Kernel {
template <typename T>
__global__ void FillBufferImpl(T* buffer, T value, int64_t size) {
int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
buffer[i] = value;
i += gridDim.x * blockDim.x;
}
}
template <typename T>
void FillBuffer(T* buffer, T value, int64_t size, StreamRef stream) {
if (size > 0) {
dim3 numBlocks;
const int blockSize = 128;
numBlocks.x = (size + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
FillBufferImpl<T> << < numBlocks, blockSize, 0, stream >> > (buffer, value, size);
}
}
#define FILL_BUFFER(Type)\
template void FillBuffer<Type>(Type* buffer, Type value, int64_t size, StreamRef stream);
FILL_BUFFER(float);
FILL_BUFFER(int);
}
}
#endif
|
3f0a66ccea1c6085224b2d4afdf3e12015471e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Chain.h>
#include <Config.h>
#include <constants.h>
#include <functions.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void updateProbs(Chain *a, int heterosis){
int g = IDX;
if(a->m > a->burnin){
if(g < a->G){
a->dex[g] += ((a->alp[g] * a->alp[g]) > 1e-6);
if(heterosis){
a->hph[g] += (a->del[g] > fabs(a->alp[g]));
a->lph[g] += (a->del[g] < -fabs(a->alp[g]));
a->mph[g] += (fabs(a->del[g]) > 1e-6);
}
}
}
}
__global__ void updateM(Chain* a){
++a->m;
}
__host__ void printHyper(Chain *host_a, Chain *dev_a, Config *cfg){
char file[BUF];
FILE *fp;
num_t tmp;
if(cfg->hyper){
sprintf(file, "hyper-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to open file, %s\n", file);
exit(EXIT_FAILURE);
}
if(cfg->constSigC){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->sigC), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constD){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->d), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTau){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->tau), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constThePhi){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->thePhi), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTheAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->theAlp), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTheDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->theDel), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigPhi){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->sigPhi), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->sigAlp), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->sigDel), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constPiAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->piAlp), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constPiDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(hipMemcpy(&(tmp), &(dev_a->piDel), sizeof(num_t), hipMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
fprintf(fp, "\n");
fclose(fp);
}
}
__host__ void printParms(Chain *host_a, Chain *dev_a, Config *cfg){
int n, g, G = cfg->G;
char file[BUF];
FILE *fp;
num_t *tmpv;
if(cfg->parms){
sprintf(file, "parms-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to open file, %s\n", file);
return;
}
tmpv = (num_t*) malloc(cfg->N * cfg->G * sizeof(num_t));
CUDA_CALL(hipMemcpy(tmpv, host_a->c, cfg->N * sizeof(num_t), hipMemcpyDeviceToHost));
for(n = 0; n < cfg->N; ++n){
fprintf(fp, NUM_TF, tmpv[n]);
fprintf(fp, " ");
}
CUDA_CALL(hipMemcpy(tmpv, host_a->phi, cfg->G * sizeof(num_t), hipMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(hipMemcpy(tmpv, host_a->alp, cfg->G * sizeof(num_t), hipMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(hipMemcpy(tmpv, host_a->del, cfg->G * sizeof(num_t), hipMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(hipMemcpy(tmpv, host_a->eta, cfg->G * sizeof(num_t), hipMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(hipMemcpy(tmpv, host_a->eps, cfg->N * cfg->G * sizeof(num_t), hipMemcpyDeviceToHost));
for(n = 0; n < cfg->N; ++n)
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[iG(n, g)]);
fprintf(fp, " ");
}
fprintf(fp, "\n");
fclose(fp);
free(tmpv);
}
}
__host__ void printTime(Chain *host_a, Chain *dev_a, Config *cfg){
char file[BUF];
FILE *fp;
if(cfg->time){
sprintf(file, "time-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to append to file, %s\n", file);
return;
}
fprintf(fp, NUM_TF, cfg->timeC); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTau); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePiAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePiDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeD); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeThePhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTheAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTheDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigC); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigPhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeEta); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeEps); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeDel); fprintf(fp, " ");
fprintf(fp, "\n");
fclose(fp);
}
}
__host__ void interimResults(Chain *host_a, Chain *dev_a, Config *cfg){
if(cfg->probs)
hipLaunchKernelGGL(( updateProbs), dim3(G_GRID), dim3(G_BLOCK), 0, 0, dev_a, cfg->heterosis);
printHyper(host_a, dev_a, cfg);
printParms(host_a, dev_a, cfg);
printTime(host_a, dev_a, cfg);
if(cfg->dic)
updateDICprep(dev_a, cfg);
++cfg->m;
hipLaunchKernelGGL(( updateM), dim3(1), dim3(1), 0, 0, dev_a);
}
| 3f0a66ccea1c6085224b2d4afdf3e12015471e3d.cu | #include <Chain.h>
#include <Config.h>
#include <constants.h>
#include <functions.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void updateProbs(Chain *a, int heterosis){
int g = IDX;
if(a->m > a->burnin){
if(g < a->G){
a->dex[g] += ((a->alp[g] * a->alp[g]) > 1e-6);
if(heterosis){
a->hph[g] += (a->del[g] > fabs(a->alp[g]));
a->lph[g] += (a->del[g] < -fabs(a->alp[g]));
a->mph[g] += (fabs(a->del[g]) > 1e-6);
}
}
}
}
__global__ void updateM(Chain* a){
++a->m;
}
__host__ void printHyper(Chain *host_a, Chain *dev_a, Config *cfg){
char file[BUF];
FILE *fp;
num_t tmp;
if(cfg->hyper){
sprintf(file, "hyper-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to open file, %s\n", file);
exit(EXIT_FAILURE);
}
if(cfg->constSigC){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->sigC), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constD){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->d), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTau){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->tau), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constThePhi){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->thePhi), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTheAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->theAlp), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constTheDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->theDel), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigPhi){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->sigPhi), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->sigAlp), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constSigDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->sigDel), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constPiAlp){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->piAlp), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
if(cfg->constPiDel){
fprintf(fp, ". ");
} else {
CUDA_CALL(cudaMemcpy(&(tmp), &(dev_a->piDel), sizeof(num_t), cudaMemcpyDeviceToHost));
fprintf(fp, NUM_TF, tmp); fprintf(fp, " ");
}
fprintf(fp, "\n");
fclose(fp);
}
}
__host__ void printParms(Chain *host_a, Chain *dev_a, Config *cfg){
int n, g, G = cfg->G;
char file[BUF];
FILE *fp;
num_t *tmpv;
if(cfg->parms){
sprintf(file, "parms-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to open file, %s\n", file);
return;
}
tmpv = (num_t*) malloc(cfg->N * cfg->G * sizeof(num_t));
CUDA_CALL(cudaMemcpy(tmpv, host_a->c, cfg->N * sizeof(num_t), cudaMemcpyDeviceToHost));
for(n = 0; n < cfg->N; ++n){
fprintf(fp, NUM_TF, tmpv[n]);
fprintf(fp, " ");
}
CUDA_CALL(cudaMemcpy(tmpv, host_a->phi, cfg->G * sizeof(num_t), cudaMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(cudaMemcpy(tmpv, host_a->alp, cfg->G * sizeof(num_t), cudaMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(cudaMemcpy(tmpv, host_a->del, cfg->G * sizeof(num_t), cudaMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(cudaMemcpy(tmpv, host_a->eta, cfg->G * sizeof(num_t), cudaMemcpyDeviceToHost));
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[g]);
fprintf(fp, " ");
}
CUDA_CALL(cudaMemcpy(tmpv, host_a->eps, cfg->N * cfg->G * sizeof(num_t), cudaMemcpyDeviceToHost));
for(n = 0; n < cfg->N; ++n)
for(g = 0; g < cfg->G; ++g){
fprintf(fp, NUM_TF, tmpv[iG(n, g)]);
fprintf(fp, " ");
}
fprintf(fp, "\n");
fclose(fp);
free(tmpv);
}
}
__host__ void printTime(Chain *host_a, Chain *dev_a, Config *cfg){
char file[BUF];
FILE *fp;
if(cfg->time){
sprintf(file, "time-chain%d.txt", cfg->chainNum);
fp = fopen(file, "a");
if(fp == NULL){
fprintf(stderr, "ERROR: unable to append to file, %s\n", file);
return;
}
fprintf(fp, NUM_TF, cfg->timeC); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTau); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePiAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePiDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeD); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeThePhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTheAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeTheDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigC); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigPhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeSigDel); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeEta); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeEps); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timePhi); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeAlp); fprintf(fp, " ");
fprintf(fp, NUM_TF, cfg->timeDel); fprintf(fp, " ");
fprintf(fp, "\n");
fclose(fp);
}
}
__host__ void interimResults(Chain *host_a, Chain *dev_a, Config *cfg){
if(cfg->probs)
updateProbs<<<G_GRID, G_BLOCK>>>(dev_a, cfg->heterosis);
printHyper(host_a, dev_a, cfg);
printParms(host_a, dev_a, cfg);
printTime(host_a, dev_a, cfg);
if(cfg->dic)
updateDICprep(dev_a, cfg);
++cfg->m;
updateM<<<1, 1>>>(dev_a);
}
|
29e3f9514c7fc1f6bf02e8f78eb79924f293d86c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "my_cuda.h"
// Fill the particle bins arrays -- partBin and partInd
__global__ void bin_fill(int *partInd, int *partBin, int nparts,
part_struct *parts, dom_struct *binDom, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
int c;
int ibin, jbin, kbin;
// Find the correct bin index for each part and store it
if (pp < nparts) {
ibin = floor((parts[pp].x - binDom->xs)/binDom->dx);
jbin = floor((parts[pp].y - binDom->ys)/binDom->dy);
kbin = floor((parts[pp].z - binDom->zs)/binDom->dz);
c = ibin + jbin*binDom->Gcc.s1 + kbin*binDom->Gcc.s2;
partInd[pp] = pp; // index of particle
partBin[pp] = c; // bin index
parts[pp].bin = c; // bin index (stored in particle)
}
}
__global__ void init(int *array, int length, int val)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;;
if (pp < length) {
array[pp] = val;
}
}
__global__ void bin_start(int *binStart, int *binEnd, int *partBin, int nparts)
{
// This kernel function was adapted from NVIDIA CUDA 5.5 Examples
// This software contains source code provided by NVIDIA Corporation
extern __shared__ int sharedBin[]; //blockSize + 1
int index = threadIdx.x + blockIdx.x*blockDim.x;
int bin;
// for a given bin index, the previous bins's index is stored in sharedBin
if (index < nparts) {
bin = partBin[index];
// Load bin data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two bin values per thread
sharedBin[threadIdx.x + 1] = bin;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle bin
sharedBin[0] = partBin[index - 1];
}
}
__syncthreads();
if (index < nparts) {
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
bin = partBin[index];
if (index == 0 || bin != sharedBin[threadIdx.x]) {
binStart[bin] = index;
if (index > 0)
binEnd[sharedBin[threadIdx.x]] = index;
}
if (index == nparts - 1)
{
binEnd[bin] = index + 1;
}
}
}
__global__ void find_nodes(part_struct *parts, int nparts, dom_struct *dom,
BC bc, int *binStart, int *binEnd, int *partBin, int *partInd,
dom_struct *binDom, int *neighborList, int *neighborCount, int orderN,
int orderL)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < nparts) {
int i = partInd[index];
int bin = partBin[index];
int kbin = floorf(bin/binDom->Gcc.s2);
int jbin = floorf((bin - kbin*binDom->Gcc.s2)/binDom->Gcc.s1);
int ibin = bin - kbin*binDom->Gcc.s2 - jbin*binDom->Gcc.s1;
int l, m, n; // adjacent bin iterators
int target, j; // target indices
int adjBin, adjStart, adjEnd; // adjacent bin stuff
int iStride, kStride, jStride; // how to get to Sesame Street
// predefine face locations
// -1, -2 due to local vs global indexing and defiinition of dom_struct
int fW = binDom->Gcc.is - 1;
int fE = binDom->Gcc.ie - 2;
int fS = binDom->Gcc.js - 1;
int fN = binDom->Gcc.je - 2;
int fB = binDom->Gcc.ks - 1;
int fT = binDom->Gcc.ke - 2;
// size checks
int xnBin = (binDom->xn > 2);
int ynBin = (binDom->yn > 2);
int znBin = (binDom->zn > 2);
// particle pair variables
double xi, yi, zi;
double xj, yj, zj;
double rx1, ry1, rz1;
double rx2, ry2, rz2;
double rx, ry, rz;
double r_ij, r2_ij, mu_ij;
// loop over adjacent bins and take care of periodic conditions
for (n = -1; n <= 1; n++) {
// if on a face and not periodic, continue
// if on a face and periodic but only 2 bins, continue
if ((n == -1 && kbin == fB && bc.pB != PERIODIC) ||
(n == 1 && kbin == fT && bc.pT != PERIODIC) ||
(n == -1 && kbin == fB && bc.pB == PERIODIC && znBin == 0) ||
(n == 1 && kbin == fT && bc.pT == PERIODIC && znBin == 0)) {
continue;
// if on a face and periodic, flip to other side
} else if (n == -1 && kbin == fB && bc.pB == PERIODIC) {
kStride = fT*binDom->Gcc.s2;
} else if (n == 1 && kbin == fT && bc.pT == PERIODIC) {
kStride = fB*binDom->Gcc.s2;
// else, we are in the middle, do nothing special
} else {
kStride = (kbin + n)*binDom->Gcc.s2;
}
for (m = -1; m <= 1; m++) {
if ((m == -1 && jbin == fS && bc.pS != PERIODIC) ||
(m == 1 && jbin == fN && bc.pN != PERIODIC) ||
(m == -1 && jbin == fS && bc.pS == PERIODIC && ynBin == 0) ||
(m == 1 && jbin == fN && bc.pN == PERIODIC && ynBin == 0)) {
continue;
} else if (m == -1 && jbin == fS && bc.pS == PERIODIC) {
jStride = fN*binDom->Gcc.s1;
} else if (m == 1 && jbin == fN && bc.pN == PERIODIC) {
jStride = fS*binDom->Gcc.s1;
} else {
jStride = (jbin + m)*binDom->Gcc.s1;
}
for (l = -1; l <= 1; l++) {
if ((l == -1 && ibin == fW && bc.pW != PERIODIC) ||
(l == 1 && ibin == fE && bc.pE != PERIODIC) ||
(l == -1 && ibin == fW && bc.pW == PERIODIC && xnBin == 0) ||
(l == 1 && ibin == fE && bc.pE == PERIODIC && xnBin == 0)) {
continue;
} else if (l == -1 && ibin == fW && bc.pW == PERIODIC) {
iStride = fE;
} else if (l == 1 && ibin == fE && bc.pE == PERIODIC) {
iStride = fW;
} else {
iStride = ibin + l;
}
adjBin = iStride + jStride + kStride;
adjStart = binStart[adjBin]; // find start and end of bins
adjEnd = binEnd[adjBin];
if (adjStart != -1) { // if bin is not empty
for (target = adjStart; target < adjEnd; target++) {
j = partInd[target];
if (j != i) { // if its not original part
/* Find part separation, check for periodic neighbors */
// X
xi = parts[i].x;
xj = parts[j].x;
rx = xi - xj;
// check and correct for separation
rx1 = xi - (xj + dom->xl);
rx2 = xi - (xj - dom->xl);
rx = rx1*(rx1*rx1 < rx*rx) + rx2*(rx2*rx2 < rx*rx);
rx = (bc.pW == PERIODIC)*rx + (bc.pW != PERIODIC)*(xi - xj);
// Y
yi = parts[i].y;
yj = parts[j].y;
// check and correct for separation
ry1 = yi - (yj + dom->yl);
ry2 = yi - (yj - dom->yl);
ry = ry1*(ry1*ry1 < ry*ry) + ry2*(ry2*ry2 < ry*ry);
ry = (bc.pS == PERIODIC)*ry + (bc.pS != PERIODIC)*(yi - yj);
// Z
zi = parts[i].z;
zj = parts[j].z;
rz = zi - zj;
// check and correct for separation
rz1 = zi - (zj + dom->zl);
rz2 = zi - (zj - dom->zl);
rz = rz1*(rz1*rz1 < rz*rz) + rz2*(rz2*rz2 < rz*rz);
rz = (bc.pB == PERIODIC)*rz + (bc.pB != PERIODIC)*(zi - zj);
// corrected separation
r2_ij = rx*rx + ry*ry + rz*rz;
r_ij = sqrt(r2_ij);
// angle mu = cos(th) = z/r -- TODO: symmetric over pi/2?
mu_ij = rz/r_ij;
// TODO: r > r0?
// Loop over L, N
for (int enn = 0; enn < orderN; enn++) {
kn = enn*PI/L
for (int ell = 0; ell < orderL; ell++) {
// Calculate coefficients for g_ln
// Calculate P_l(mu)
P_ell = eval_legendre_poly(mu_ij, ell)
// Calculate kernel, p_l(mu)/r^2
kernel = P_ell/r2_ij;
}
}
}
}
}
}
}
}
}
}
__device__ double eval_legendre_poly(double mu_ij, int ell)
{
// from numerical recipes, page 184
double p1 = 1.;
double p2 = 0.;
for (int j = 0; j < ell; j++) {
p3 = p2;
p2 = p1;
p1 = ((2.*j + 1.)*mu*p2 - j*p3)/(j + 1.);
}
// p1 has answer
return p1;
}
| 29e3f9514c7fc1f6bf02e8f78eb79924f293d86c.cu | #include "my_cuda.h"
// Fill the particle bins arrays -- partBin and partInd
__global__ void bin_fill(int *partInd, int *partBin, int nparts,
part_struct *parts, dom_struct *binDom, BC bc)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;
int c;
int ibin, jbin, kbin;
// Find the correct bin index for each part and store it
if (pp < nparts) {
ibin = floor((parts[pp].x - binDom->xs)/binDom->dx);
jbin = floor((parts[pp].y - binDom->ys)/binDom->dy);
kbin = floor((parts[pp].z - binDom->zs)/binDom->dz);
c = ibin + jbin*binDom->Gcc.s1 + kbin*binDom->Gcc.s2;
partInd[pp] = pp; // index of particle
partBin[pp] = c; // bin index
parts[pp].bin = c; // bin index (stored in particle)
}
}
__global__ void init(int *array, int length, int val)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x;;
if (pp < length) {
array[pp] = val;
}
}
__global__ void bin_start(int *binStart, int *binEnd, int *partBin, int nparts)
{
// This kernel function was adapted from NVIDIA CUDA 5.5 Examples
// This software contains source code provided by NVIDIA Corporation
extern __shared__ int sharedBin[]; //blockSize + 1
int index = threadIdx.x + blockIdx.x*blockDim.x;
int bin;
// for a given bin index, the previous bins's index is stored in sharedBin
if (index < nparts) {
bin = partBin[index];
// Load bin data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two bin values per thread
sharedBin[threadIdx.x + 1] = bin;
if (index > 0 && threadIdx.x == 0) {
// first thread in block must load neighbor particle bin
sharedBin[0] = partBin[index - 1];
}
}
__syncthreads();
if (index < nparts) {
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
bin = partBin[index];
if (index == 0 || bin != sharedBin[threadIdx.x]) {
binStart[bin] = index;
if (index > 0)
binEnd[sharedBin[threadIdx.x]] = index;
}
if (index == nparts - 1)
{
binEnd[bin] = index + 1;
}
}
}
__global__ void find_nodes(part_struct *parts, int nparts, dom_struct *dom,
BC bc, int *binStart, int *binEnd, int *partBin, int *partInd,
dom_struct *binDom, int *neighborList, int *neighborCount, int orderN,
int orderL)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < nparts) {
int i = partInd[index];
int bin = partBin[index];
int kbin = floorf(bin/binDom->Gcc.s2);
int jbin = floorf((bin - kbin*binDom->Gcc.s2)/binDom->Gcc.s1);
int ibin = bin - kbin*binDom->Gcc.s2 - jbin*binDom->Gcc.s1;
int l, m, n; // adjacent bin iterators
int target, j; // target indices
int adjBin, adjStart, adjEnd; // adjacent bin stuff
int iStride, kStride, jStride; // how to get to Sesame Street
// predefine face locations
// -1, -2 due to local vs global indexing and defiinition of dom_struct
int fW = binDom->Gcc.is - 1;
int fE = binDom->Gcc.ie - 2;
int fS = binDom->Gcc.js - 1;
int fN = binDom->Gcc.je - 2;
int fB = binDom->Gcc.ks - 1;
int fT = binDom->Gcc.ke - 2;
// size checks
int xnBin = (binDom->xn > 2);
int ynBin = (binDom->yn > 2);
int znBin = (binDom->zn > 2);
// particle pair variables
double xi, yi, zi;
double xj, yj, zj;
double rx1, ry1, rz1;
double rx2, ry2, rz2;
double rx, ry, rz;
double r_ij, r2_ij, mu_ij;
// loop over adjacent bins and take care of periodic conditions
for (n = -1; n <= 1; n++) {
// if on a face and not periodic, continue
// if on a face and periodic but only 2 bins, continue
if ((n == -1 && kbin == fB && bc.pB != PERIODIC) ||
(n == 1 && kbin == fT && bc.pT != PERIODIC) ||
(n == -1 && kbin == fB && bc.pB == PERIODIC && znBin == 0) ||
(n == 1 && kbin == fT && bc.pT == PERIODIC && znBin == 0)) {
continue;
// if on a face and periodic, flip to other side
} else if (n == -1 && kbin == fB && bc.pB == PERIODIC) {
kStride = fT*binDom->Gcc.s2;
} else if (n == 1 && kbin == fT && bc.pT == PERIODIC) {
kStride = fB*binDom->Gcc.s2;
// else, we are in the middle, do nothing special
} else {
kStride = (kbin + n)*binDom->Gcc.s2;
}
for (m = -1; m <= 1; m++) {
if ((m == -1 && jbin == fS && bc.pS != PERIODIC) ||
(m == 1 && jbin == fN && bc.pN != PERIODIC) ||
(m == -1 && jbin == fS && bc.pS == PERIODIC && ynBin == 0) ||
(m == 1 && jbin == fN && bc.pN == PERIODIC && ynBin == 0)) {
continue;
} else if (m == -1 && jbin == fS && bc.pS == PERIODIC) {
jStride = fN*binDom->Gcc.s1;
} else if (m == 1 && jbin == fN && bc.pN == PERIODIC) {
jStride = fS*binDom->Gcc.s1;
} else {
jStride = (jbin + m)*binDom->Gcc.s1;
}
for (l = -1; l <= 1; l++) {
if ((l == -1 && ibin == fW && bc.pW != PERIODIC) ||
(l == 1 && ibin == fE && bc.pE != PERIODIC) ||
(l == -1 && ibin == fW && bc.pW == PERIODIC && xnBin == 0) ||
(l == 1 && ibin == fE && bc.pE == PERIODIC && xnBin == 0)) {
continue;
} else if (l == -1 && ibin == fW && bc.pW == PERIODIC) {
iStride = fE;
} else if (l == 1 && ibin == fE && bc.pE == PERIODIC) {
iStride = fW;
} else {
iStride = ibin + l;
}
adjBin = iStride + jStride + kStride;
adjStart = binStart[adjBin]; // find start and end of bins
adjEnd = binEnd[adjBin];
if (adjStart != -1) { // if bin is not empty
for (target = adjStart; target < adjEnd; target++) {
j = partInd[target];
if (j != i) { // if its not original part
/* Find part separation, check for periodic neighbors */
// X
xi = parts[i].x;
xj = parts[j].x;
rx = xi - xj;
// check and correct for separation
rx1 = xi - (xj + dom->xl);
rx2 = xi - (xj - dom->xl);
rx = rx1*(rx1*rx1 < rx*rx) + rx2*(rx2*rx2 < rx*rx);
rx = (bc.pW == PERIODIC)*rx + (bc.pW != PERIODIC)*(xi - xj);
// Y
yi = parts[i].y;
yj = parts[j].y;
// check and correct for separation
ry1 = yi - (yj + dom->yl);
ry2 = yi - (yj - dom->yl);
ry = ry1*(ry1*ry1 < ry*ry) + ry2*(ry2*ry2 < ry*ry);
ry = (bc.pS == PERIODIC)*ry + (bc.pS != PERIODIC)*(yi - yj);
// Z
zi = parts[i].z;
zj = parts[j].z;
rz = zi - zj;
// check and correct for separation
rz1 = zi - (zj + dom->zl);
rz2 = zi - (zj - dom->zl);
rz = rz1*(rz1*rz1 < rz*rz) + rz2*(rz2*rz2 < rz*rz);
rz = (bc.pB == PERIODIC)*rz + (bc.pB != PERIODIC)*(zi - zj);
// corrected separation
r2_ij = rx*rx + ry*ry + rz*rz;
r_ij = sqrt(r2_ij);
// angle mu = cos(th) = z/r -- TODO: symmetric over pi/2?
mu_ij = rz/r_ij;
// TODO: r > r0?
// Loop over L, N
for (int enn = 0; enn < orderN; enn++) {
kn = enn*PI/L
for (int ell = 0; ell < orderL; ell++) {
// Calculate coefficients for g_ln
// Calculate P_l(mu)
P_ell = eval_legendre_poly(mu_ij, ell)
// Calculate kernel, p_l(mu)/r^2
kernel = P_ell/r2_ij;
}
}
}
}
}
}
}
}
}
}
__device__ double eval_legendre_poly(double mu_ij, int ell)
{
// from numerical recipes, page 184
double p1 = 1.;
double p2 = 0.;
for (int j = 0; j < ell; j++) {
p3 = p2;
p2 = p1;
p1 = ((2.*j + 1.)*mu*p2 - j*p3)/(j + 1.);
}
// p1 has answer
return p1;
}
|
3b54deb26e13b8cef21e4bce8f45918f8709edf3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define N 100
#define PAR_SIM_COUNT 5
#define T 1
#define T_MIN 0.1
#define ALPHA 0.9
#define ITERATION 1000000
__global__
void expandRandom(float *a, int min, int max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index] *= (max - min + 0.9999);
}
__device__
int expandRandom(float a, int min, int max){
return (int) ((a - 1) * (min - max) / 1 + min);
}
__global__
void fill(int *a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index] = index%N;
}
__global__
void produceInitial(hiprandState_t* globalState,int *a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t localState = globalState[index];
for(int i =0; i<N; i++){
int rnd1 = expandRandom(hiprand_uniform( &localState ),0,N-1);
int rnd2 = expandRandom(hiprand_uniform( &localState ),0,N-1);
int temp = a[index*N+rnd1];
a[index*N+rnd1] = a[index*N+rnd2];
a[index*N+rnd2] = temp;
}
}
__global__
void costCalc(int *a, int *costMatrix, int* calculatedCost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int sum;
sum = 0;
__syncthreads();
atomicAdd(&sum, costMatrix[a[index]*N + a[index+1]]);
__syncthreads();
*calculatedCost = sum;
}
__global__
void copyMatPar(int *mat1, int *mat2, int ind){
int index = blockIdx.x * blockDim.x + threadIdx.x;
mat1[N*ind + index] = mat2[index];
}
__global__
void initSolCopy(int *mat1, int *init, int ind){
int index = blockIdx.x * blockDim.x + threadIdx.x;
mat1[index] = init[N*ind + index];
}
__global__
void setup_kernel ( hiprandState_t * state, unsigned long seed )
{
int id = threadIdx.x;
hiprand_init ( seed, id, 0, &state[id] );
}
__global__
void solver( hiprandState_t* globalState, int* init_solution, int * costMatrix, int *solution, int *cost, int* initCosts)
{
int ind = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Thred %d: Init cost: %d\n", ind, initCosts[ind]);
float temperature = T;
hiprandState_t localState = globalState[ind];
int *currentSol = new int[N];
int *newSol = new int[N];
hipLaunchKernelGGL(( initSolCopy), dim3(1),dim3(N), 0, 0, currentSol, init_solution, ind);
hipLaunchKernelGGL(( initSolCopy), dim3(1),dim3(N), 0, 0, newSol, init_solution, ind);
int *currentCost = new int;
*currentCost = initCosts[ind];
int * newCost = new int;
while(temperature > T_MIN){
for(int i=0; i<ITERATION/500; i++){
int rnd1 = expandRandom(hiprand_uniform( &localState ),0,N-1);
int rnd2 = expandRandom(hiprand_uniform( &localState ),0,N-1);
if(rnd1 == rnd2)
continue;
int temp = newSol[rnd1];
newSol[rnd1] = newSol[rnd2];
newSol[rnd2] = temp;
*newCost = 0;
*currentCost = 0;
hipLaunchKernelGGL(( costCalc), dim3(1),dim3(N), 0, 0, currentSol, costMatrix, currentCost);
hipLaunchKernelGGL(( costCalc), dim3(1),dim3(N), 0, 0, newSol, costMatrix, newCost);
hipDeviceSynchronize();
if(*newCost < *currentCost){
*currentCost = *newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else if(hiprand_uniform( &localState ) < exp(((*currentCost- *newCost)/temperature))){
*currentCost = *newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else{
newSol[rnd1] = currentSol[rnd1];
newSol[rnd2] = currentSol[rnd2];
}
}
temperature *= ALPHA;
}
hipLaunchKernelGGL(( copyMatPar), dim3(1),dim3(N), 0, 0, solution, currentSol, ind);
cost[ind] = *currentCost;
}
int main() {
struct timeval startc, end;
long seconds, useconds;
double mtime;
int *cost_matrix;
int* init_sol;
int *finalSolutions;
int *finalCosts;
hiprandState_t* devStates;
hipMalloc(&devStates, N*sizeof(hiprandState_t));
printf("Started\n");
hipMallocManaged((void **)&cost_matrix, N*N*sizeof(int));
hipMallocManaged((void **)&finalSolutions, N*N*sizeof(int));
hipMallocManaged((void **)&finalCosts, N*sizeof(int));
hipMallocManaged((void **)&init_sol, (PAR_SIM_COUNT+1)*N*sizeof(int));
srand(time(0));
for(int i = 0; i<N*N; i++){
cost_matrix[i] = rand()%100;
}
/*for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f\t",cost_matrix[i*N+j]);
}
printf("\n");
} */
hipLaunchKernelGGL(( fill), dim3(PAR_SIM_COUNT+1),dim3(N), 0, 0, init_sol);
hipLaunchKernelGGL(( setup_kernel) , dim3(1), dim3(N), 0, 0, devStates, time(NULL));
hipDeviceSynchronize();
hipLaunchKernelGGL(( produceInitial), dim3(1),dim3(PAR_SIM_COUNT+1), 0, 0, devStates,init_sol);
int *init_costs;
hipMallocManaged((void **)&init_costs, N*sizeof(int));
for(int i = 0; i<PAR_SIM_COUNT+1; i++)
hipLaunchKernelGGL(( costCalc), dim3(1),dim3(N), 0, 0, &init_sol[i*N], cost_matrix, &init_costs[i]);
hipDeviceSynchronize();
gettimeofday(&startc, NULL);
hipLaunchKernelGGL(( solver), dim3(1), dim3(PAR_SIM_COUNT), 0, 0, devStates, init_sol, cost_matrix, finalSolutions, finalCosts, init_costs);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
/*for(int i = 0; i<PAR_SIM_COUNT; i++){
printf("Init Cost %d: %d\n", i, init_costs[i]);
}*/
printf("GPU Solution: ");
printf("\nTime of GPU: %g\n", mtime);
//See all solutions and its costs
/*for(int i=0; i<PAR_SIM_COUNT; i++){
for(int j=0; j<N; j++){
printf("Array[%d,%d]\t%d\n", i, j, finalSolutions[N*i+j]);
}
printf("---------------\n");
}
for(int i=0; i<PAR_SIM_COUNT; i++){
printf("cost[%d]: %d\n", i, finalCosts[i]);
}*/
int minCost = finalCosts[0];
//int minCostIndex = 0;
for(int i=1; i<PAR_SIM_COUNT; i++){
if(minCost > finalCosts[i]){
minCost = finalCosts[i];
//minCostIndex = i;
}
}
/*for(int i = 0; i<N; i++){
printf("%d -> ", finalSolutions[N*minCostIndex + i]);
}*/
printf("GPU Cost: %d\n\n",minCost);
//CPU test
{
srand(time(0));
int currentSol[N];
int newSol[N];
gettimeofday(&startc, NULL);
float temperature = T;
float alpha = ALPHA;
float t_min = T_MIN;
int currentCost = init_costs[PAR_SIM_COUNT];
for(int i = 0; i<N; i++){
currentSol[i] = init_sol[PAR_SIM_COUNT*N+i];
newSol[i] = init_sol[PAR_SIM_COUNT*N+i];
}
int newCost = 0;
printf("\nCPU Init: %d\n", currentCost);
while(temperature > t_min){
for(int i=0; i<ITERATION; i++){
int rnd1 = rand()%N;
int rnd2 = rand()%N;
if(rnd1 == rnd2)
continue;
int temp = newSol[rnd1];
newSol[rnd1] = newSol[rnd2];
newSol[rnd2] = temp;
currentCost =0;
newCost =0;
for(int i= 0; i<N-1; i++){
currentCost += cost_matrix[currentSol[i]*N+currentSol[i+1]];
newCost += cost_matrix[newSol[i]*N+newSol[i+1]];
}
if(newCost < currentCost){
currentCost = newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else if(((double)rand() / (double)RAND_MAX)< exp(((currentCost -newCost )/temperature))){
currentCost = newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else{
newSol[rnd1] = currentSol[rnd1];
newSol[rnd2] = currentSol[rnd2];
}
}
temperature *= alpha;
}
printf("CPU Solution: ");
/*for(int i = 0; i<N; i++){
printf("%d -> ", currentSol[i]);
}*/
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("\nTime of CPU: %g\n", mtime);
printf("CPU Cost: %d", currentCost);
}
printf("\n");
return 0;
}
| 3b54deb26e13b8cef21e4bce8f45918f8709edf3.cu | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda.h>
#include <math.h>
#define N 100
#define PAR_SIM_COUNT 5
#define T 1
#define T_MIN 0.1
#define ALPHA 0.9
#define ITERATION 1000000
__global__
void expandRandom(float *a, int min, int max){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index] *= (max - min + 0.9999);
}
__device__
int expandRandom(float a, int min, int max){
return (int) ((a - 1) * (min - max) / 1 + min);
}
__global__
void fill(int *a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
a[index] = index%N;
}
__global__
void produceInitial(curandState* globalState,int *a){
int index = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState = globalState[index];
for(int i =0; i<N; i++){
int rnd1 = expandRandom(curand_uniform( &localState ),0,N-1);
int rnd2 = expandRandom(curand_uniform( &localState ),0,N-1);
int temp = a[index*N+rnd1];
a[index*N+rnd1] = a[index*N+rnd2];
a[index*N+rnd2] = temp;
}
}
__global__
void costCalc(int *a, int *costMatrix, int* calculatedCost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int sum;
sum = 0;
__syncthreads();
atomicAdd(&sum, costMatrix[a[index]*N + a[index+1]]);
__syncthreads();
*calculatedCost = sum;
}
__global__
void copyMatPar(int *mat1, int *mat2, int ind){
int index = blockIdx.x * blockDim.x + threadIdx.x;
mat1[N*ind + index] = mat2[index];
}
__global__
void initSolCopy(int *mat1, int *init, int ind){
int index = blockIdx.x * blockDim.x + threadIdx.x;
mat1[index] = init[N*ind + index];
}
__global__
void setup_kernel ( curandState * state, unsigned long seed )
{
int id = threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
__global__
void solver( curandState* globalState, int* init_solution, int * costMatrix, int *solution, int *cost, int* initCosts)
{
int ind = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Thred %d: Init cost: %d\n", ind, initCosts[ind]);
float temperature = T;
curandState localState = globalState[ind];
int *currentSol = new int[N];
int *newSol = new int[N];
initSolCopy<<<1,N>>>(currentSol, init_solution, ind);
initSolCopy<<<1,N>>>(newSol, init_solution, ind);
int *currentCost = new int;
*currentCost = initCosts[ind];
int * newCost = new int;
while(temperature > T_MIN){
for(int i=0; i<ITERATION/500; i++){
int rnd1 = expandRandom(curand_uniform( &localState ),0,N-1);
int rnd2 = expandRandom(curand_uniform( &localState ),0,N-1);
if(rnd1 == rnd2)
continue;
int temp = newSol[rnd1];
newSol[rnd1] = newSol[rnd2];
newSol[rnd2] = temp;
*newCost = 0;
*currentCost = 0;
costCalc<<<1,N>>>(currentSol, costMatrix, currentCost);
costCalc<<<1,N>>>(newSol, costMatrix, newCost);
cudaDeviceSynchronize();
if(*newCost < *currentCost){
*currentCost = *newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else if(curand_uniform( &localState ) < exp(((*currentCost- *newCost)/temperature))){
*currentCost = *newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else{
newSol[rnd1] = currentSol[rnd1];
newSol[rnd2] = currentSol[rnd2];
}
}
temperature *= ALPHA;
}
copyMatPar<<<1,N>>>(solution, currentSol, ind);
cost[ind] = *currentCost;
}
int main() {
struct timeval startc, end;
long seconds, useconds;
double mtime;
int *cost_matrix;
int* init_sol;
int *finalSolutions;
int *finalCosts;
curandState* devStates;
cudaMalloc(&devStates, N*sizeof(curandState));
printf("Started\n");
cudaMallocManaged((void **)&cost_matrix, N*N*sizeof(int));
cudaMallocManaged((void **)&finalSolutions, N*N*sizeof(int));
cudaMallocManaged((void **)&finalCosts, N*sizeof(int));
cudaMallocManaged((void **)&init_sol, (PAR_SIM_COUNT+1)*N*sizeof(int));
srand(time(0));
for(int i = 0; i<N*N; i++){
cost_matrix[i] = rand()%100;
}
/*for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f\t",cost_matrix[i*N+j]);
}
printf("\n");
} */
fill<<<PAR_SIM_COUNT+1,N>>>(init_sol);
setup_kernel <<<1, N>>>(devStates, time(NULL));
cudaDeviceSynchronize();
produceInitial<<<1,PAR_SIM_COUNT+1>>>(devStates,init_sol);
int *init_costs;
cudaMallocManaged((void **)&init_costs, N*sizeof(int));
for(int i = 0; i<PAR_SIM_COUNT+1; i++)
costCalc<<<1,N>>>(&init_sol[i*N], cost_matrix, &init_costs[i]);
cudaDeviceSynchronize();
gettimeofday(&startc, NULL);
solver<<<1, PAR_SIM_COUNT>>> (devStates, init_sol, cost_matrix, finalSolutions, finalCosts, init_costs);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
/*for(int i = 0; i<PAR_SIM_COUNT; i++){
printf("Init Cost %d: %d\n", i, init_costs[i]);
}*/
printf("GPU Solution: ");
printf("\nTime of GPU: %g\n", mtime);
//See all solutions and its costs
/*for(int i=0; i<PAR_SIM_COUNT; i++){
for(int j=0; j<N; j++){
printf("Array[%d,%d]\t%d\n", i, j, finalSolutions[N*i+j]);
}
printf("---------------\n");
}
for(int i=0; i<PAR_SIM_COUNT; i++){
printf("cost[%d]: %d\n", i, finalCosts[i]);
}*/
int minCost = finalCosts[0];
//int minCostIndex = 0;
for(int i=1; i<PAR_SIM_COUNT; i++){
if(minCost > finalCosts[i]){
minCost = finalCosts[i];
//minCostIndex = i;
}
}
/*for(int i = 0; i<N; i++){
printf("%d -> ", finalSolutions[N*minCostIndex + i]);
}*/
printf("GPU Cost: %d\n\n",minCost);
//CPU test
{
srand(time(0));
int currentSol[N];
int newSol[N];
gettimeofday(&startc, NULL);
float temperature = T;
float alpha = ALPHA;
float t_min = T_MIN;
int currentCost = init_costs[PAR_SIM_COUNT];
for(int i = 0; i<N; i++){
currentSol[i] = init_sol[PAR_SIM_COUNT*N+i];
newSol[i] = init_sol[PAR_SIM_COUNT*N+i];
}
int newCost = 0;
printf("\nCPU Init: %d\n", currentCost);
while(temperature > t_min){
for(int i=0; i<ITERATION; i++){
int rnd1 = rand()%N;
int rnd2 = rand()%N;
if(rnd1 == rnd2)
continue;
int temp = newSol[rnd1];
newSol[rnd1] = newSol[rnd2];
newSol[rnd2] = temp;
currentCost =0;
newCost =0;
for(int i= 0; i<N-1; i++){
currentCost += cost_matrix[currentSol[i]*N+currentSol[i+1]];
newCost += cost_matrix[newSol[i]*N+newSol[i+1]];
}
if(newCost < currentCost){
currentCost = newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else if(((double)rand() / (double)RAND_MAX)< exp(((currentCost -newCost )/temperature))){
currentCost = newCost;
currentSol[rnd1] = newSol[rnd1];
currentSol[rnd2] = newSol[rnd2];
}
else{
newSol[rnd1] = currentSol[rnd1];
newSol[rnd2] = currentSol[rnd2];
}
}
temperature *= alpha;
}
printf("CPU Solution: ");
/*for(int i = 0; i<N; i++){
printf("%d -> ", currentSol[i]);
}*/
gettimeofday(&end, NULL);
seconds = end.tv_sec - startc.tv_sec;
useconds = end.tv_usec - startc.tv_usec;
mtime = useconds;
mtime/=1000;
mtime+=seconds*1000;
printf("\nTime of CPU: %g\n", mtime);
printf("CPU Cost: %d", currentCost);
}
printf("\n");
return 0;
}
|
215d626ff5f13b17498153072e0a2d09af4ed045.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zherk_fermi_batched.cu normal z -> c, Sat Nov 15 19:53:59 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cgemm_fermi_kernels_batched.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CHERK performs one of the hermitian rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n hermitian
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_cherk_batched_lg(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
float alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ldda,
float beta,
magmaFloatComplex **dC_array, magma_int_t lddc, magma_int_t batchCount )
{
magmaFloatComplex cbeta = MAGMA_C_MAKE( beta, 0. );
magmaFloatComplex calpha = MAGMA_C_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(magmaFloatComplex));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(magmaFloatComplex);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_nt_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_nc_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_tn_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_c_herk_kernel_fermi_cn_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 215d626ff5f13b17498153072e0a2d09af4ed045.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zherk_fermi_batched.cu normal z -> c, Sat Nov 15 19:53:59 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cgemm_fermi_kernels_batched.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
CHERK performs one of the hermitian rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n hermitian
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC COMPLEX array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_cblas3
********************************************************************/
extern "C" void
magmablas_cherk_batched_lg(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
float alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ldda,
float beta,
magmaFloatComplex **dC_array, magma_int_t lddc, magma_int_t batchCount )
{
magmaFloatComplex cbeta = MAGMA_C_MAKE( beta, 0. );
magmaFloatComplex calpha = MAGMA_C_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(magmaFloatComplex));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(magmaFloatComplex);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_nt_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_nc_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_tn_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
magmablas_c_herk_kernel_fermi_cn_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
9917f4afd6035855b56984f6750f2c027d909d49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <numeric>
#include <raft/spatial/knn/knn.hpp>
#include <utilities/high_res_timer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include "tsp.hpp"
#include "tsp_solver.hpp"
namespace cugraph {
namespace detail {
TSP::TSP(raft::handle_t const& handle,
int const* vtx_ptr,
float const* x_pos,
float const* y_pos,
int nodes,
int restarts,
bool beam_search,
int k,
int nstart,
bool verbose,
int* route)
: handle_(handle),
vtx_ptr_(vtx_ptr),
x_pos_(x_pos),
y_pos_(y_pos),
nodes_(nodes),
restarts_(restarts),
beam_search_(beam_search),
k_(k),
nstart_(nstart),
verbose_(verbose),
route_(route),
stream_(handle_.get_stream()),
max_blocks_(handle_.get_device_properties().maxGridSize[0]),
max_threads_(handle_.get_device_properties().maxThreadsPerBlock),
warp_size_(handle_.get_device_properties().warpSize),
sm_count_(handle_.get_device_properties().multiProcessorCount),
restart_batch_(8192),
mylock_scalar_(stream_),
best_cost_scalar_(stream_),
neighbors_vec_((k_ + 1) * nodes_, stream_),
work_vec_(restart_batch_ * ((4 * nodes_ + 3 + warp_size_ - 1) / warp_size_ * warp_size_),
stream_),
best_x_pos_vec_(1, stream_),
best_y_pos_vec_(1, stream_),
best_route_vec_(1, stream_)
{
setup();
}
void TSP::setup()
{
mylock_ = mylock_scalar_.data();
neighbors_ = neighbors_vec_.data();
// pre-allocate workspace for climbs, each block needs a separate permutation space and search
// buffer. We allocate a work buffer that will store the computed distances, px, py and the route.
// We align it on the warp size.
work_ = work_vec_.data();
results_.best_x_pos = best_x_pos_vec_.data();
results_.best_y_pos = best_y_pos_vec_.data();
results_.best_route = best_route_vec_.data();
results_.best_cost = best_cost_scalar_.data();
}
void TSP::reset_batch()
{
mylock_scalar_.set_value_to_zero_async(stream_);
auto const max{std::numeric_limits<int>::max()};
best_cost_scalar_.set_value_async(max, stream_);
}
void TSP::get_initial_solution(int const batch)
{
if (!beam_search_) {
hipLaunchKernelGGL(( random_init), dim3(restart_batch_), dim3(best_thread_num_), 0, 0,
work_, x_pos_, y_pos_, vtx_ptr_, nstart_, nodes_, batch, restart_batch_);
CHECK_CUDA(stream_);
} else {
hipLaunchKernelGGL(( knn_init), dim3(restart_batch_), dim3(best_thread_num_), 0, 0,
work_, x_pos_, y_pos_, vtx_ptr_, neighbors_, nstart_, nodes_, k_, batch, restart_batch_);
CHECK_CUDA(stream_);
}
}
float TSP::compute()
{
float final_cost = 0.f;
int num_restart_batches = (restarts_ + restart_batch_ - 1) / restart_batch_;
int restart_resid = restarts_ - (num_restart_batches - 1) * restart_batch_;
int global_best = std::numeric_limits<int>::max();
int best = 0;
std::vector<float> h_x_pos;
std::vector<float> h_y_pos;
std::vector<int> h_route;
h_x_pos.reserve(nodes_ + 1);
h_y_pos.reserve(nodes_ + 1);
h_route.reserve(nodes_);
std::vector<float*> addr_best_x_pos(1);
std::vector<float*> addr_best_y_pos(1);
std::vector<int*> addr_best_route(1);
HighResTimer hr_timer;
auto create_timer = [&hr_timer, this](char const* name) {
return VerboseTimer(name, hr_timer, verbose_);
};
if (verbose_) {
std::cout << "Doing " << num_restart_batches << " batches of size " << restart_batch_
<< ", with " << restart_resid << " tail\n";
std::cout << "configuration: " << nodes_ << " nodes, " << restarts_ << " restart\n";
std::cout << "optimizing graph with kswap = " << kswaps << "\n";
}
// Tell the cache how we want it to behave
hipFuncSetCacheConfig(search_solution, hipFuncCachePreferEqual);
best_thread_num_ = best_thread_count(nodes_, max_threads_, sm_count_, warp_size_);
if (verbose_) std::cout << "Calculated best thread number = " << best_thread_num_ << "\n";
if (beam_search_) {
auto timer = create_timer("knn");
knn();
}
for (auto batch = 0; batch < num_restart_batches; ++batch) {
reset_batch();
if (batch == num_restart_batches - 1) restart_batch_ = restart_resid;
{
auto timer = create_timer("initial_sol");
get_initial_solution(batch);
}
{
auto timer = create_timer("search_sol");
hipLaunchKernelGGL(( search_solution), dim3(restart_batch_),
dim3(best_thread_num_),
sizeof(int) * best_thread_num_,
stream_,
results_, mylock_, vtx_ptr_, beam_search_, k_, nodes_, x_pos_, y_pos_, work_, nstart_);
CHECK_CUDA(stream_);
}
{
auto timer = create_timer("optimal_tour");
hipLaunchKernelGGL(( get_optimal_tour), dim3(restart_batch_),
dim3(best_thread_num_),
sizeof(int) * best_thread_num_,
stream_, results_, mylock_, work_, nodes_);
CHECK_CUDA(stream_);
}
hipDeviceSynchronize();
best = best_cost_scalar_.value(stream_);
if (verbose_) std::cout << "Best reported by kernel = " << best << "\n";
if (best < global_best) {
global_best = best;
raft::update_host(addr_best_x_pos.data(), results_.best_x_pos, 1, stream_);
raft::update_host(addr_best_y_pos.data(), results_.best_y_pos, 1, stream_);
raft::update_host(addr_best_route.data(), results_.best_route, 1, stream_);
CUDA_TRY(hipStreamSynchronize(stream_));
raft::copy(h_x_pos.data(), addr_best_x_pos[0], nodes_ + 1, stream_);
raft::copy(h_y_pos.data(), addr_best_y_pos[0], nodes_ + 1, stream_);
raft::copy(h_route.data(), addr_best_route[0], nodes_, stream_);
raft::copy(route_, addr_best_route[0], nodes_, stream_);
CHECK_CUDA(stream_);
}
}
for (auto i = 0; i < nodes_; ++i) {
if (verbose_) { std::cout << h_route[i] << ": " << h_x_pos[i] << " " << h_y_pos[i] << "\n"; }
final_cost += euclidean_dist(h_x_pos.data(), h_y_pos.data(), i, i + 1);
}
if (verbose_) {
hr_timer.display(std::cout);
std::cout << "Optimized tour length = " << global_best << "\n";
}
return final_cost;
}
void TSP::knn()
{
if (verbose_) std::cout << "Looking at " << k_ << " nearest neighbors\n";
int dim = 2;
bool row_major_order = false;
rmm::device_uvector<float> input(nodes_ * dim, stream_);
float* input_ptr = input.data();
raft::copy(input_ptr, x_pos_, nodes_, stream_);
raft::copy(input_ptr + nodes_, y_pos_, nodes_, stream_);
rmm::device_uvector<float> search_data(nodes_ * dim, stream_);
float* search_data_ptr = search_data.data();
raft::copy(search_data_ptr, input_ptr, nodes_ * dim, stream_);
rmm::device_uvector<float> distances(nodes_ * (k_ + 1), stream_);
float* distances_ptr = distances.data();
std::vector<float*> input_vec;
std::vector<int> sizes_vec;
input_vec.push_back(input_ptr);
sizes_vec.push_back(nodes_);
// k neighbors + 1 is needed because the nearest neighbor of each point is
// the point itself that we don't want to take into account.
raft::spatial::knn::brute_force_knn(handle_,
input_vec,
sizes_vec,
dim,
search_data_ptr,
nodes_,
neighbors_,
distances_ptr,
k_ + 1,
row_major_order,
row_major_order);
}
} // namespace detail
float traveling_salesperson(raft::handle_t const& handle,
int const* vtx_ptr,
float const* x_pos,
float const* y_pos,
int nodes,
int restarts,
bool beam_search,
int k,
int nstart,
bool verbose,
int* route)
{
RAFT_EXPECTS(route != nullptr, "route should equal the number of nodes");
RAFT_EXPECTS(nodes > 0, "nodes should be strictly positive");
RAFT_EXPECTS(restarts > 0, "restarts should be strictly positive");
RAFT_EXPECTS(nstart >= 0 && nstart < nodes, "nstart should be between 0 and nodes - 1");
RAFT_EXPECTS(k > 0, "k should be strictly positive");
cugraph::detail::TSP tsp(
handle, vtx_ptr, x_pos, y_pos, nodes, restarts, beam_search, k, nstart, verbose, route);
return tsp.compute();
}
} // namespace cugraph
| 9917f4afd6035855b56984f6750f2c027d909d49.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <numeric>
#include <raft/spatial/knn/knn.hpp>
#include <utilities/high_res_timer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include "tsp.hpp"
#include "tsp_solver.hpp"
namespace cugraph {
namespace detail {
TSP::TSP(raft::handle_t const& handle,
int const* vtx_ptr,
float const* x_pos,
float const* y_pos,
int nodes,
int restarts,
bool beam_search,
int k,
int nstart,
bool verbose,
int* route)
: handle_(handle),
vtx_ptr_(vtx_ptr),
x_pos_(x_pos),
y_pos_(y_pos),
nodes_(nodes),
restarts_(restarts),
beam_search_(beam_search),
k_(k),
nstart_(nstart),
verbose_(verbose),
route_(route),
stream_(handle_.get_stream()),
max_blocks_(handle_.get_device_properties().maxGridSize[0]),
max_threads_(handle_.get_device_properties().maxThreadsPerBlock),
warp_size_(handle_.get_device_properties().warpSize),
sm_count_(handle_.get_device_properties().multiProcessorCount),
restart_batch_(8192),
mylock_scalar_(stream_),
best_cost_scalar_(stream_),
neighbors_vec_((k_ + 1) * nodes_, stream_),
work_vec_(restart_batch_ * ((4 * nodes_ + 3 + warp_size_ - 1) / warp_size_ * warp_size_),
stream_),
best_x_pos_vec_(1, stream_),
best_y_pos_vec_(1, stream_),
best_route_vec_(1, stream_)
{
setup();
}
void TSP::setup()
{
mylock_ = mylock_scalar_.data();
neighbors_ = neighbors_vec_.data();
// pre-allocate workspace for climbs, each block needs a separate permutation space and search
// buffer. We allocate a work buffer that will store the computed distances, px, py and the route.
// We align it on the warp size.
work_ = work_vec_.data();
results_.best_x_pos = best_x_pos_vec_.data();
results_.best_y_pos = best_y_pos_vec_.data();
results_.best_route = best_route_vec_.data();
results_.best_cost = best_cost_scalar_.data();
}
void TSP::reset_batch()
{
mylock_scalar_.set_value_to_zero_async(stream_);
auto const max{std::numeric_limits<int>::max()};
best_cost_scalar_.set_value_async(max, stream_);
}
void TSP::get_initial_solution(int const batch)
{
if (!beam_search_) {
random_init<<<restart_batch_, best_thread_num_>>>(
work_, x_pos_, y_pos_, vtx_ptr_, nstart_, nodes_, batch, restart_batch_);
CHECK_CUDA(stream_);
} else {
knn_init<<<restart_batch_, best_thread_num_>>>(
work_, x_pos_, y_pos_, vtx_ptr_, neighbors_, nstart_, nodes_, k_, batch, restart_batch_);
CHECK_CUDA(stream_);
}
}
float TSP::compute()
{
float final_cost = 0.f;
int num_restart_batches = (restarts_ + restart_batch_ - 1) / restart_batch_;
int restart_resid = restarts_ - (num_restart_batches - 1) * restart_batch_;
int global_best = std::numeric_limits<int>::max();
int best = 0;
std::vector<float> h_x_pos;
std::vector<float> h_y_pos;
std::vector<int> h_route;
h_x_pos.reserve(nodes_ + 1);
h_y_pos.reserve(nodes_ + 1);
h_route.reserve(nodes_);
std::vector<float*> addr_best_x_pos(1);
std::vector<float*> addr_best_y_pos(1);
std::vector<int*> addr_best_route(1);
HighResTimer hr_timer;
auto create_timer = [&hr_timer, this](char const* name) {
return VerboseTimer(name, hr_timer, verbose_);
};
if (verbose_) {
std::cout << "Doing " << num_restart_batches << " batches of size " << restart_batch_
<< ", with " << restart_resid << " tail\n";
std::cout << "configuration: " << nodes_ << " nodes, " << restarts_ << " restart\n";
std::cout << "optimizing graph with kswap = " << kswaps << "\n";
}
// Tell the cache how we want it to behave
cudaFuncSetCacheConfig(search_solution, cudaFuncCachePreferEqual);
best_thread_num_ = best_thread_count(nodes_, max_threads_, sm_count_, warp_size_);
if (verbose_) std::cout << "Calculated best thread number = " << best_thread_num_ << "\n";
if (beam_search_) {
auto timer = create_timer("knn");
knn();
}
for (auto batch = 0; batch < num_restart_batches; ++batch) {
reset_batch();
if (batch == num_restart_batches - 1) restart_batch_ = restart_resid;
{
auto timer = create_timer("initial_sol");
get_initial_solution(batch);
}
{
auto timer = create_timer("search_sol");
search_solution<<<restart_batch_,
best_thread_num_,
sizeof(int) * best_thread_num_,
stream_>>>(
results_, mylock_, vtx_ptr_, beam_search_, k_, nodes_, x_pos_, y_pos_, work_, nstart_);
CHECK_CUDA(stream_);
}
{
auto timer = create_timer("optimal_tour");
get_optimal_tour<<<restart_batch_,
best_thread_num_,
sizeof(int) * best_thread_num_,
stream_>>>(results_, mylock_, work_, nodes_);
CHECK_CUDA(stream_);
}
cudaDeviceSynchronize();
best = best_cost_scalar_.value(stream_);
if (verbose_) std::cout << "Best reported by kernel = " << best << "\n";
if (best < global_best) {
global_best = best;
raft::update_host(addr_best_x_pos.data(), results_.best_x_pos, 1, stream_);
raft::update_host(addr_best_y_pos.data(), results_.best_y_pos, 1, stream_);
raft::update_host(addr_best_route.data(), results_.best_route, 1, stream_);
CUDA_TRY(cudaStreamSynchronize(stream_));
raft::copy(h_x_pos.data(), addr_best_x_pos[0], nodes_ + 1, stream_);
raft::copy(h_y_pos.data(), addr_best_y_pos[0], nodes_ + 1, stream_);
raft::copy(h_route.data(), addr_best_route[0], nodes_, stream_);
raft::copy(route_, addr_best_route[0], nodes_, stream_);
CHECK_CUDA(stream_);
}
}
for (auto i = 0; i < nodes_; ++i) {
if (verbose_) { std::cout << h_route[i] << ": " << h_x_pos[i] << " " << h_y_pos[i] << "\n"; }
final_cost += euclidean_dist(h_x_pos.data(), h_y_pos.data(), i, i + 1);
}
if (verbose_) {
hr_timer.display(std::cout);
std::cout << "Optimized tour length = " << global_best << "\n";
}
return final_cost;
}
void TSP::knn()
{
if (verbose_) std::cout << "Looking at " << k_ << " nearest neighbors\n";
int dim = 2;
bool row_major_order = false;
rmm::device_uvector<float> input(nodes_ * dim, stream_);
float* input_ptr = input.data();
raft::copy(input_ptr, x_pos_, nodes_, stream_);
raft::copy(input_ptr + nodes_, y_pos_, nodes_, stream_);
rmm::device_uvector<float> search_data(nodes_ * dim, stream_);
float* search_data_ptr = search_data.data();
raft::copy(search_data_ptr, input_ptr, nodes_ * dim, stream_);
rmm::device_uvector<float> distances(nodes_ * (k_ + 1), stream_);
float* distances_ptr = distances.data();
std::vector<float*> input_vec;
std::vector<int> sizes_vec;
input_vec.push_back(input_ptr);
sizes_vec.push_back(nodes_);
// k neighbors + 1 is needed because the nearest neighbor of each point is
// the point itself that we don't want to take into account.
raft::spatial::knn::brute_force_knn(handle_,
input_vec,
sizes_vec,
dim,
search_data_ptr,
nodes_,
neighbors_,
distances_ptr,
k_ + 1,
row_major_order,
row_major_order);
}
} // namespace detail
float traveling_salesperson(raft::handle_t const& handle,
int const* vtx_ptr,
float const* x_pos,
float const* y_pos,
int nodes,
int restarts,
bool beam_search,
int k,
int nstart,
bool verbose,
int* route)
{
RAFT_EXPECTS(route != nullptr, "route should equal the number of nodes");
RAFT_EXPECTS(nodes > 0, "nodes should be strictly positive");
RAFT_EXPECTS(restarts > 0, "restarts should be strictly positive");
RAFT_EXPECTS(nstart >= 0 && nstart < nodes, "nstart should be between 0 and nodes - 1");
RAFT_EXPECTS(k > 0, "k should be strictly positive");
cugraph::detail::TSP tsp(
handle, vtx_ptr, x_pos, y_pos, nodes, restarts, beam_search, k, nstart, verbose, route);
return tsp.compute();
}
} // namespace cugraph
|
df4638641f250693d6cdb1f064ed647df6ba3798.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <assert.h>
int number_count = 640; // Allocation yapilacak int miktari
const int allocation_size = number_count * sizeof(int); // number_count * 4 B
void* cpu_p;
void* gpu_p;
void cpuAlloc() // allocation_size kadar RAM alani tahsis et
{
cpu_p = malloc(allocation_size);
assert(nullptr != cpu_p);
}
void gpuAlloc() // allocation_size kadar GPU memory tahsis et
{
hipError_t result = hipMalloc(&gpu_p, allocation_size);
assert(result == hipSuccess);
}
void cpuMemoryToGpuMemory() // cpu memory alanini (RAM), gpu memory alanina kopyala
{
hipError_t result = hipMemcpy(gpu_p, cpu_p, allocation_size, hipMemcpyHostToDevice); // cpu memory to gpu memory
assert(result == hipSuccess);
}
void gpuMemoryToCpuMemory() // gpu memory alanini, cpu memory alanina kopyala
{
hipError_t result = hipMemcpy(cpu_p, gpu_p, allocation_size, hipMemcpyDeviceToHost); // gpu memory to cpu memory
assert(result == hipSuccess);
}
void cpuSetNumbers() // cpu bellek alanina, number_count kadar sayi setle
{
int* cpu_int32 = (int*)cpu_p;
for (int i = 0; i < number_count; i++)
cpu_int32[i] = i;
}
__global__ void gpuAdd(int* gpu_numbers) // Paralel islemlenecek kisim, nvcc tarafindan burada compiler edilir
{
int threadId = threadIdx.x; // GPU de calisacak fonksiyon threadIdx isminde bir degiskene sahiptir
// threadIdx degiskeni 0 dan baslayarak, baslatilan thread sayisina kadar ilerler
gpu_numbers[threadId] *= 2; // gpu_numbers adresindeki her degeri 2 katina cikar
}
void printCpuNumbers()
{
int* cpu_int32 = (int*)cpu_p;
for (size_t i = 0; i < number_count; i++) {
printf("%d\t%d\n", i, cpu_int32[i]);
}
}
void cpuFree() // cpu memory serbest birak
{
free(cpu_p);
}
void gpuFree() // gpu memory serbest birak
{
hipError_t result = hipFree(gpu_p);
assert(result == hipSuccess);
}
void main()
{
cpuAlloc();
cpuSetNumbers();
gpuAlloc();
cpuMemoryToGpuMemory();
// GPU bellegi uzerinden paralel veri islemleme yapiliyor..
hipLaunchKernelGGL(( gpuAdd) , dim3(1), dim3(number_count) , 0, 0, (int*)gpu_p);
// GPU uzerinde tum islemler asenkron olarak yapilacaktir...
hipError_t result = hipDeviceSynchronize(); // hipDeviceSynchronize ile tum islemlerin bitmesini bekleriz..
assert(result == hipSuccess); // if it is result = 0, process successful
gpuMemoryToCpuMemory(); // Cpu memory alanina, Gpu bellek alaninda islemlenen tum degerler aktarilir
printCpuNumbers();
gpuFree();
cpuFree();
system("pause");
} | df4638641f250693d6cdb1f064ed647df6ba3798.cu | #include<iostream>
#include<stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <assert.h>
int number_count = 640; // Allocation yapilacak int miktari
const int allocation_size = number_count * sizeof(int); // number_count * 4 B
void* cpu_p;
void* gpu_p;
void cpuAlloc() // allocation_size kadar RAM alani tahsis et
{
cpu_p = malloc(allocation_size);
assert(nullptr != cpu_p);
}
void gpuAlloc() // allocation_size kadar GPU memory tahsis et
{
cudaError_t result = cudaMalloc(&gpu_p, allocation_size);
assert(result == cudaSuccess);
}
void cpuMemoryToGpuMemory() // cpu memory alanini (RAM), gpu memory alanina kopyala
{
cudaError_t result = cudaMemcpy(gpu_p, cpu_p, allocation_size, cudaMemcpyHostToDevice); // cpu memory to gpu memory
assert(result == cudaSuccess);
}
void gpuMemoryToCpuMemory() // gpu memory alanini, cpu memory alanina kopyala
{
cudaError_t result = cudaMemcpy(cpu_p, gpu_p, allocation_size, cudaMemcpyDeviceToHost); // gpu memory to cpu memory
assert(result == cudaSuccess);
}
void cpuSetNumbers() // cpu bellek alanina, number_count kadar sayi setle
{
int* cpu_int32 = (int*)cpu_p;
for (int i = 0; i < number_count; i++)
cpu_int32[i] = i;
}
__global__ void gpuAdd(int* gpu_numbers) // Paralel islemlenecek kisim, nvcc tarafindan burada compiler edilir
{
int threadId = threadIdx.x; // GPU de calisacak fonksiyon threadIdx isminde bir degiskene sahiptir
// threadIdx degiskeni 0 dan baslayarak, baslatilan thread sayisina kadar ilerler
gpu_numbers[threadId] *= 2; // gpu_numbers adresindeki her degeri 2 katina cikar
}
void printCpuNumbers()
{
int* cpu_int32 = (int*)cpu_p;
for (size_t i = 0; i < number_count; i++) {
printf("%d\t%d\n", i, cpu_int32[i]);
}
}
void cpuFree() // cpu memory serbest birak
{
free(cpu_p);
}
void gpuFree() // gpu memory serbest birak
{
cudaError_t result = cudaFree(gpu_p);
assert(result == cudaSuccess);
}
void main()
{
cpuAlloc();
cpuSetNumbers();
gpuAlloc();
cpuMemoryToGpuMemory();
// GPU bellegi uzerinden paralel veri islemleme yapiliyor..
gpuAdd <<< 1, number_count >>> ((int*)gpu_p);
// GPU uzerinde tum islemler asenkron olarak yapilacaktir...
cudaError_t result = cudaDeviceSynchronize(); // cudaDeviceSynchronize ile tum islemlerin bitmesini bekleriz..
assert(result == cudaSuccess); // if it is result = 0, process successful
gpuMemoryToCpuMemory(); // Cpu memory alanina, Gpu bellek alaninda islemlenen tum degerler aktarilir
printCpuNumbers();
gpuFree();
cpuFree();
system("pause");
} |
cb959fc3b0cc00ff6865b89c440169829429fb94.hip | // !!! This is a file automatically generated by hipify!!!
//===--------------------------- tensor_utils.cu --------------------------===//
//
//===----------------------------------------------------------------------===//
//
// This file consists of the custom implementation of utility functions
// useful for approximated and non-approximated versions of tensor operations.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include <cudnn.h>
#include <cublas_api.h>
#include <vector>
#include "tensor_utils.h"
#include "tensor_runtime.h"
#include "debug.h"
#include "tensor.h"
#include "global_data.h"
#include "fp16_gemm.h"
extern "C" {
void *deepCopy(void * tensor_ptr) {
struct Tensor *original_tensor = (struct Tensor*) tensor_ptr;
struct Tensor *new_tensor = (struct Tensor *)malloc(sizeof(Tensor));
allocateMem(new_tensor, original_tensor->data_type, original_tensor->num_elems);
tensorCopy(original_tensor, new_tensor);
new_tensor->dims.num_dims = original_tensor->dims.num_dims;
new_tensor->dims.dim_sizes = (size_t *) malloc(sizeof(size_t) * original_tensor->dims.num_dims);
for(int i = 0; i < original_tensor->dims.num_dims; ++i) {
new_tensor->dims.dim_sizes[i] = original_tensor->dims.dim_sizes[i];
}
return (void*) new_tensor;
}
void freeTensor(void *tensor_ptr) {
Tensor *tensor = (Tensor *)tensor_ptr;
tensors_ptr.erase(tensor->gpu_data);
tensors_ptr.erase(tensor->gpu_half_data);
host_ptr.erase(tensor->host_data);
hipFree(tensor->gpu_data);
tensor->gpu_data = nullptr;
hipFree(tensor->gpu_half_data);
tensor->gpu_half_data = nullptr;
free(tensor->host_data);
tensor->host_data = nullptr;
}
// Returns the size of the target datatype
int getTypeSize(int data_type) {
// TODO: Add support for more data types
switch (data_type) {
case float_type:
return 4;
case double_type:
return 8;
case half_type:
return 2;
case int_type:
return 1;
case float2_type:
return 8;
case half2_type:
return 4;
default:
ERROR("Unknown type %d\n", data_type);
}
return 0;
}
static int getFullPrecTypeSize(int data_type) {
switch (data_type) {
case float_type:
case half_type:
return 4;
case double_type:
return 8;
case int_type:
return 1;
case float2_type:
case half2_type:
return 8;
default:
ERROR("Unknown type %d\n", data_type);
}
return 0;
}
static bool isFP16Compound(int data_type) {
return data_type == half_type || data_type == half2_type;
}
void setSizeInBytes(struct Tensor *tensor, int data_type, size_t num_elems) {
int type_size = getTypeSize(data_type);
size_t size_in_bytes = type_size * num_elems;
tensor->size_in_bytes = size_in_bytes;
DEBUG("***--- size_in_bytes = %d \n", size_in_bytes);
}
// NOTE: Always allocates FP32 on Host, FP32/FP16 for Device (GPU)
void allocateMem(struct Tensor *tensor, int data_type, size_t num_elems) {
setSizeInBytes(tensor, data_type, num_elems);
tensor->data_type = data_type;
tensor->cur_type =
data_type; // type maintained for hanlding FP32 <-> FP16 conversions
tensor->num_elems = num_elems;
size_t size_on_host =
num_elems * getFullPrecTypeSize(data_type); // NOTE: On host, always FP32
tensor->host_data =
(void *)malloc(size_on_host); // Allocate memory on the host
tensor->data_placement = HOST; // By defaut data is on the host
DEBUG("Attempting to Allocate = %lu \n\n\n", tensor->size_in_bytes);
if (isFP16Compound(data_type)) {
// Allocate FP16-like
checkCUDA(hipMalloc(&tensor->gpu_half_data, tensor->size_in_bytes));
tensors_ptr.insert(tensor->gpu_half_data);
tensor->gpu_data = nullptr;
} else {
// Allocate FP32-like, or int
checkCUDA(hipMalloc(&tensor->gpu_data, tensor->size_in_bytes));
tensors_ptr.insert(tensor->gpu_data);
tensor->gpu_half_data = nullptr;
}
tracked_tensors[tensor] = 1; // For FP16-FP32 data handling
host_ptr.insert(tensor->host_data);
obj_ptr.insert(tensor);
// host_ptr.push_back(tensor->host_data);
}
/// Two tensor formats are supported: NCHW and NHWC.
/// TODO: Make this more general in the future.
///
void setCudnnDataFormat(struct Tensor *tensor, int data_format) {
switch (data_format) {
case 0:
data_format = CUDNN_TENSOR_NCHW;
break;
case 1:
data_format = CUDNN_TENSOR_NHWC;
break;
default:
break;
}
tensor->data_format = data_format;
DEBUG("tensor->data_format = %d \n", tensor->data_format);
}
void set4DFilterDescriptor(struct Tensor *tensor, int data_format,
size_t dim1_size, size_t dim2_size, size_t dim3_size,
size_t dim4_size) {
setCudnnDataFormat(tensor, data_format);
checkCUDNN(cudnnCreateFilterDescriptor(&tensor->filter_desc));
checkCUDNN(cudnnCreateFilterDescriptor(&tensor->filter_half_desc));
checkCUDNN(cudnnSetFilter4dDescriptor(
tensor->filter_desc,
(cudnnDataType_t)CUDNN_DATA_FLOAT, // tensor->data_type,
(cudnnTensorFormat_t)tensor->data_format, dim1_size, dim2_size, dim3_size,
dim4_size));
checkCUDNN(cudnnSetFilter4dDescriptor(
tensor->filter_half_desc, (cudnnDataType_t)CUDNN_DATA_HALF,
(cudnnTensorFormat_t)tensor->data_format, dim1_size, dim2_size, dim3_size,
dim4_size));
}
void set4DTensorDescriptor(struct Tensor *tensor, int data_format,
size_t dim1_size, size_t dim2_size, size_t dim3_size,
size_t dim4_size) {
setCudnnDataFormat(tensor, data_format);
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_desc));
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_half_desc));
// For certain operations, the strides may need to change - in which case the
// descriptor needs to be reinitialized
cudnnSetTensor4dDescriptor(
tensor->tensor_desc,
(cudnnTensorFormat_t)tensor->data_format, // Data format
(cudnnDataType_t)CUDNN_DATA_FLOAT, // tensor->data_type, // Data type
dim1_size, dim2_size, dim3_size, dim4_size);
cudnnSetTensor4dDescriptor(
tensor->tensor_half_desc,
(cudnnTensorFormat_t)tensor->data_format, // Data format
(cudnnDataType_t)CUDNN_DATA_HALF, // Data type
dim1_size, dim2_size, dim3_size, dim4_size);
cudnnDataType_t dType;
int nStride, cStride, hStride, wStride;
int size1, size2, size3, size4;
cudnnGetTensor4dDescriptor(tensor->tensor_desc, &dType, &size1, &size2,
&size3, &size4, &nStride, &cStride, &hStride,
&wStride);
DEBUG("nStride = %d, cStride = %d, hStride = %d, wStride = %d \n", nStride,
cStride, hStride, wStride);
}
// FIXIT: Striding still not working - hence 2D and 3D tensor support is missing
void setTensorDescriptor(struct Tensor *tensor, int num_dims,
size_t *dim_sizes) {
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_desc));
int *strides = (int *)malloc(sizeof(int) * num_dims);
strides[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
strides[i] = strides[i + 1] * dim_sizes[i + 1];
}
for (int i = 0; i < num_dims; i++) {
DEBUG("strides[%d] = %d \n", i, strides[i]);
}
int *const_dims = (int *)malloc(sizeof(int) * num_dims);
for (int j = 0; j < num_dims; j++) {
const_dims[j] = (int)dim_sizes[j];
DEBUG("const_dim = %d \n", const_dims[j]);
}
DEBUG("data_type = %d, cuDNN_value = %d \n", tensor->data_type,
CUDNN_DATA_FLOAT);
// For certain operations, the strides may need to change - in which case the
// descriptor needs to be reinitialized
checkCUDNN(cudnnSetTensorNdDescriptor(
tensor->tensor_desc,
(cudnnDataType_t)tensor->data_type, // Data type
num_dims, (const int *)const_dims, (const int *)strides));
}
/// HPVM tensor runtime allows creation of 2D, 3D and 4D tensors.
void *create2DTensor(int data_type, size_t dim1_size, size_t dim2_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 2);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 2;
return tensor;
}
void *create3DTensor(int data_type, size_t dim1_size, size_t dim2_size,
size_t dim3_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size * dim3_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 3);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
dim_sizes[2] = dim3_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 3;
return tensor;
}
void *create4DTensor(int data_type, int data_format, size_t dim1_size,
size_t dim2_size, size_t dim3_size, size_t dim4_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 4);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
dim_sizes[2] = dim3_size;
dim_sizes[3] = dim4_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 4;
// Done setting tensor dimensions
// setTensorDescriptor(tensor, 4, dim_sizes);
set4DTensorDescriptor(tensor, data_format, dim1_size, dim2_size, dim3_size,
dim4_size);
// FIXIT: filter descriptor should be invoked only for filters
set4DFilterDescriptor(tensor, data_format, dim1_size, dim2_size, dim3_size,
dim4_size);
changeTensorPlacement(tensor, HOST);
return tensor;
}
void initTensorData(void *tensor_ptr, void *data_ptr, size_t size_in_bytes) {
Tensor *tensor = (Tensor *) tensor_ptr;
size_t host_size_in_bytes = tensor->num_elems * 4;
if (host_size_in_bytes != size_in_bytes) {
ERROR("The destination and source sizes don't match");
}
std::memcpy(tensor->host_data, data_ptr, size_in_bytes);
changeTensorPlacement(tensor, HOST);
tensor->cur_type = float_type;
}
void hostToDeviceCopy(struct Tensor *tensor) {
DEBUG("** HostToDevice *** \n");
if (tensor->data_placement != DEVICE) {
hipMemcpy(tensor->gpu_data, tensor->host_data, tensor->size_in_bytes,
hipMemcpyHostToDevice);
DEBUG("Moving %d bytes from host to GPU \n", tensor->size_in_bytes);
tensor->data_placement = DEVICE;
}
else {
DEBUG("No data movement required - Data on Device \n");
}
}
void deviceToHostCopy(struct Tensor *tensor) {
DEBUG("*** DeviceToHost *** ");
if (tensor->data_placement != HOST) {
hipMemcpy(tensor->host_data, tensor->gpu_data, tensor->size_in_bytes,
hipMemcpyDeviceToHost);
DEBUG("Moving %d bytes from GPU to host \n", tensor->size_in_bytes);
tensor->data_placement = HOST;
}
else {
DEBUG("No data movement required - Data on Host \n");
}
}
void tensorCopy(void *srcTensor_ptr, void *dstTensor_ptr) {
struct Tensor *srcTensor = (struct Tensor *)srcTensor_ptr;
struct Tensor *dstTensor = (struct Tensor *)dstTensor_ptr;
if (srcTensor->data_placement == HOST) {
memcpy(dstTensor->host_data, srcTensor->host_data,
srcTensor->size_in_bytes);
DEBUG("Moving %d bytes from host to host \n", srcTensor->size_in_bytes);
dstTensor->data_placement = HOST;
}
else if (srcTensor->data_placement == DEVICE) {
hipMemcpy(dstTensor->gpu_data, srcTensor->gpu_data,
srcTensor->size_in_bytes, hipMemcpyDeviceToDevice);
DEBUG("Moving %d bytes from GPU to GPU \n", srcTensor->size_in_bytes);
dstTensor->data_placement = DEVICE;
}
}
void hpvm_request_tensor(void *tensor_ptr, int destination) {
Tensor *tensor = (Tensor *)tensor_ptr;
// If destination is the host
if (destination == 0) {
if (tensor->data_placement != HOST) {
hipMemcpy(tensor->host_data, tensor->gpu_data, tensor->size_in_bytes,
hipMemcpyDeviceToHost);
DEBUG("Moving %d bytes from GPU to host \n", tensor->size_in_bytes);
tensor->data_placement = HOST;
}
else {
DEBUG("No data movement required - Data on Host \n");
}
}
// If destination is the GPU
else if (destination == 1) {
if (tensor->data_placement != DEVICE) {
hipMemcpy(tensor->gpu_data, tensor->host_data, tensor->size_in_bytes,
hipMemcpyHostToDevice);
DEBUG("Moving %d bytes from host to GPU \n", tensor->size_in_bytes);
tensor->data_placement = DEVICE;
}
else {
DEBUG("No data movement required - Data on Device \n");
}
}
}
void convertToFP16(struct Tensor *tensor) {
if (tensor == NULL)
return;
if (tensor->cur_type == half_type)
return;
DEBUG("ConvertoFP16 \n");
setSizeInBytes(tensor, half_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
DEBUG("size_in_bytes = %d \n", size_in_bytes);
if (tensor->gpu_half_data == NULL)
checkCudaErrors(hipMalloc(&tensor->gpu_half_data,
size_in_bytes)); // Allocate memory on GPU
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_half_data);
f2h((float *)tensor->gpu_data, tensor->num_elems,
(half *)tensor->gpu_half_data);
tensor->cur_type = half_type;
}
void convertToFP32(struct Tensor *tensor) {
if (tensor == NULL)
return;
// Need this check for both offline and online profiling path
if (tensor->cur_type == float_type)
return;
DEBUG("ConvertoFP32 \n");
setSizeInBytes(tensor, float_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
// If FP32 data array doesn't exist, allocate
if (tensor->gpu_data == NULL) {
checkCudaErrors(
hipMalloc(&tensor->gpu_data, size_in_bytes)); // Allocate memory on GPU
DEBUG("NOTE: Allocating new FP32 Array with size = %lu \n", size_in_bytes);
}
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_data);
h2f((half *)tensor->gpu_half_data, tensor->num_elems,
(float *)tensor->gpu_data);
tensor->cur_type = float_type;
}
void convertToFP32_offline(struct Tensor *tensor) {
if (tensor == NULL)
return;
if (tensor->cur_type == half_type)
return;
DEBUG("ConvertoFP32 \n");
setSizeInBytes(tensor, float_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
// If FP32 data array doesn't exist, allocate
if (tensor->gpu_data == NULL) {
checkCudaErrors(
hipMalloc(&tensor->gpu_data, size_in_bytes)); // Allocate memory on GPU
DEBUG("NOTE: Allocating new FP32 Array with size = %lu \n", size_in_bytes);
}
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_data);
h2f((half *)tensor->gpu_half_data, tensor->num_elems,
(float *)tensor->gpu_data);
tensor->cur_type = float_type;
hipFree(tensor->gpu_half_data);
tensors_ptr.erase(tensor->gpu_half_data);
tensor->gpu_half_data = NULL;
}
// Called from within the runtime to change the data placement
// This routine is required to change the output data placements from host to
// device
void changeTensorPlacement(struct Tensor *tensor,
data_location_t data_placement) {
if (tensor == NULL)
ERROR("Tensor == NULL");
tensor->data_placement = data_placement;
}
} // end of Extern"C"
| cb959fc3b0cc00ff6865b89c440169829429fb94.cu | //===--------------------------- tensor_utils.cu --------------------------===//
//
//===----------------------------------------------------------------------===//
//
// This file consists of the custom implementation of utility functions
// useful for approximated and non-approximated versions of tensor operations.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <ctime>
#include <cfloat>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include <cublas_api.h>
#include <vector>
#include "tensor_utils.h"
#include "tensor_runtime.h"
#include "debug.h"
#include "tensor.h"
#include "global_data.h"
#include "fp16_gemm.h"
extern "C" {
void *deepCopy(void * tensor_ptr) {
struct Tensor *original_tensor = (struct Tensor*) tensor_ptr;
struct Tensor *new_tensor = (struct Tensor *)malloc(sizeof(Tensor));
allocateMem(new_tensor, original_tensor->data_type, original_tensor->num_elems);
tensorCopy(original_tensor, new_tensor);
new_tensor->dims.num_dims = original_tensor->dims.num_dims;
new_tensor->dims.dim_sizes = (size_t *) malloc(sizeof(size_t) * original_tensor->dims.num_dims);
for(int i = 0; i < original_tensor->dims.num_dims; ++i) {
new_tensor->dims.dim_sizes[i] = original_tensor->dims.dim_sizes[i];
}
return (void*) new_tensor;
}
void freeTensor(void *tensor_ptr) {
Tensor *tensor = (Tensor *)tensor_ptr;
tensors_ptr.erase(tensor->gpu_data);
tensors_ptr.erase(tensor->gpu_half_data);
host_ptr.erase(tensor->host_data);
cudaFree(tensor->gpu_data);
tensor->gpu_data = nullptr;
cudaFree(tensor->gpu_half_data);
tensor->gpu_half_data = nullptr;
free(tensor->host_data);
tensor->host_data = nullptr;
}
// Returns the size of the target datatype
int getTypeSize(int data_type) {
// TODO: Add support for more data types
switch (data_type) {
case float_type:
return 4;
case double_type:
return 8;
case half_type:
return 2;
case int_type:
return 1;
case float2_type:
return 8;
case half2_type:
return 4;
default:
ERROR("Unknown type %d\n", data_type);
}
return 0;
}
static int getFullPrecTypeSize(int data_type) {
switch (data_type) {
case float_type:
case half_type:
return 4;
case double_type:
return 8;
case int_type:
return 1;
case float2_type:
case half2_type:
return 8;
default:
ERROR("Unknown type %d\n", data_type);
}
return 0;
}
static bool isFP16Compound(int data_type) {
return data_type == half_type || data_type == half2_type;
}
void setSizeInBytes(struct Tensor *tensor, int data_type, size_t num_elems) {
int type_size = getTypeSize(data_type);
size_t size_in_bytes = type_size * num_elems;
tensor->size_in_bytes = size_in_bytes;
DEBUG("***--- size_in_bytes = %d \n", size_in_bytes);
}
// NOTE: Always allocates FP32 on Host, FP32/FP16 for Device (GPU)
void allocateMem(struct Tensor *tensor, int data_type, size_t num_elems) {
setSizeInBytes(tensor, data_type, num_elems);
tensor->data_type = data_type;
tensor->cur_type =
data_type; // type maintained for hanlding FP32 <-> FP16 conversions
tensor->num_elems = num_elems;
size_t size_on_host =
num_elems * getFullPrecTypeSize(data_type); // NOTE: On host, always FP32
tensor->host_data =
(void *)malloc(size_on_host); // Allocate memory on the host
tensor->data_placement = HOST; // By defaut data is on the host
DEBUG("Attempting to Allocate = %lu \n\n\n", tensor->size_in_bytes);
if (isFP16Compound(data_type)) {
// Allocate FP16-like
checkCUDA(cudaMalloc(&tensor->gpu_half_data, tensor->size_in_bytes));
tensors_ptr.insert(tensor->gpu_half_data);
tensor->gpu_data = nullptr;
} else {
// Allocate FP32-like, or int
checkCUDA(cudaMalloc(&tensor->gpu_data, tensor->size_in_bytes));
tensors_ptr.insert(tensor->gpu_data);
tensor->gpu_half_data = nullptr;
}
tracked_tensors[tensor] = 1; // For FP16-FP32 data handling
host_ptr.insert(tensor->host_data);
obj_ptr.insert(tensor);
// host_ptr.push_back(tensor->host_data);
}
/// Two tensor formats are supported: NCHW and NHWC.
/// TODO: Make this more general in the future.
///
void setCudnnDataFormat(struct Tensor *tensor, int data_format) {
switch (data_format) {
case 0:
data_format = CUDNN_TENSOR_NCHW;
break;
case 1:
data_format = CUDNN_TENSOR_NHWC;
break;
default:
break;
}
tensor->data_format = data_format;
DEBUG("tensor->data_format = %d \n", tensor->data_format);
}
void set4DFilterDescriptor(struct Tensor *tensor, int data_format,
size_t dim1_size, size_t dim2_size, size_t dim3_size,
size_t dim4_size) {
setCudnnDataFormat(tensor, data_format);
checkCUDNN(cudnnCreateFilterDescriptor(&tensor->filter_desc));
checkCUDNN(cudnnCreateFilterDescriptor(&tensor->filter_half_desc));
checkCUDNN(cudnnSetFilter4dDescriptor(
tensor->filter_desc,
(cudnnDataType_t)CUDNN_DATA_FLOAT, // tensor->data_type,
(cudnnTensorFormat_t)tensor->data_format, dim1_size, dim2_size, dim3_size,
dim4_size));
checkCUDNN(cudnnSetFilter4dDescriptor(
tensor->filter_half_desc, (cudnnDataType_t)CUDNN_DATA_HALF,
(cudnnTensorFormat_t)tensor->data_format, dim1_size, dim2_size, dim3_size,
dim4_size));
}
void set4DTensorDescriptor(struct Tensor *tensor, int data_format,
size_t dim1_size, size_t dim2_size, size_t dim3_size,
size_t dim4_size) {
setCudnnDataFormat(tensor, data_format);
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_desc));
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_half_desc));
// For certain operations, the strides may need to change - in which case the
// descriptor needs to be reinitialized
cudnnSetTensor4dDescriptor(
tensor->tensor_desc,
(cudnnTensorFormat_t)tensor->data_format, // Data format
(cudnnDataType_t)CUDNN_DATA_FLOAT, // tensor->data_type, // Data type
dim1_size, dim2_size, dim3_size, dim4_size);
cudnnSetTensor4dDescriptor(
tensor->tensor_half_desc,
(cudnnTensorFormat_t)tensor->data_format, // Data format
(cudnnDataType_t)CUDNN_DATA_HALF, // Data type
dim1_size, dim2_size, dim3_size, dim4_size);
cudnnDataType_t dType;
int nStride, cStride, hStride, wStride;
int size1, size2, size3, size4;
cudnnGetTensor4dDescriptor(tensor->tensor_desc, &dType, &size1, &size2,
&size3, &size4, &nStride, &cStride, &hStride,
&wStride);
DEBUG("nStride = %d, cStride = %d, hStride = %d, wStride = %d \n", nStride,
cStride, hStride, wStride);
}
// FIXIT: Striding still not working - hence 2D and 3D tensor support is missing
void setTensorDescriptor(struct Tensor *tensor, int num_dims,
size_t *dim_sizes) {
checkCUDNN(cudnnCreateTensorDescriptor(&tensor->tensor_desc));
int *strides = (int *)malloc(sizeof(int) * num_dims);
strides[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
strides[i] = strides[i + 1] * dim_sizes[i + 1];
}
for (int i = 0; i < num_dims; i++) {
DEBUG("strides[%d] = %d \n", i, strides[i]);
}
int *const_dims = (int *)malloc(sizeof(int) * num_dims);
for (int j = 0; j < num_dims; j++) {
const_dims[j] = (int)dim_sizes[j];
DEBUG("const_dim = %d \n", const_dims[j]);
}
DEBUG("data_type = %d, cuDNN_value = %d \n", tensor->data_type,
CUDNN_DATA_FLOAT);
// For certain operations, the strides may need to change - in which case the
// descriptor needs to be reinitialized
checkCUDNN(cudnnSetTensorNdDescriptor(
tensor->tensor_desc,
(cudnnDataType_t)tensor->data_type, // Data type
num_dims, (const int *)const_dims, (const int *)strides));
}
/// HPVM tensor runtime allows creation of 2D, 3D and 4D tensors.
void *create2DTensor(int data_type, size_t dim1_size, size_t dim2_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 2);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 2;
return tensor;
}
void *create3DTensor(int data_type, size_t dim1_size, size_t dim2_size,
size_t dim3_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size * dim3_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 3);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
dim_sizes[2] = dim3_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 3;
return tensor;
}
void *create4DTensor(int data_type, int data_format, size_t dim1_size,
size_t dim2_size, size_t dim3_size, size_t dim4_size) {
struct Tensor *tensor = (struct Tensor *)malloc(sizeof(Tensor));
size_t num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
allocateMem(tensor, data_type, num_elems);
// Setting the tensor dimensions
size_t *dim_sizes = (size_t *)malloc(sizeof(size_t) * 4);
dim_sizes[0] = dim1_size;
dim_sizes[1] = dim2_size;
dim_sizes[2] = dim3_size;
dim_sizes[3] = dim4_size;
tensor->dims.dim_sizes = dim_sizes;
tensor->dims.num_dims = 4;
// Done setting tensor dimensions
// setTensorDescriptor(tensor, 4, dim_sizes);
set4DTensorDescriptor(tensor, data_format, dim1_size, dim2_size, dim3_size,
dim4_size);
// FIXIT: filter descriptor should be invoked only for filters
set4DFilterDescriptor(tensor, data_format, dim1_size, dim2_size, dim3_size,
dim4_size);
changeTensorPlacement(tensor, HOST);
return tensor;
}
void initTensorData(void *tensor_ptr, void *data_ptr, size_t size_in_bytes) {
Tensor *tensor = (Tensor *) tensor_ptr;
size_t host_size_in_bytes = tensor->num_elems * 4;
if (host_size_in_bytes != size_in_bytes) {
ERROR("The destination and source sizes don't match");
}
std::memcpy(tensor->host_data, data_ptr, size_in_bytes);
changeTensorPlacement(tensor, HOST);
tensor->cur_type = float_type;
}
void hostToDeviceCopy(struct Tensor *tensor) {
DEBUG("** HostToDevice *** \n");
if (tensor->data_placement != DEVICE) {
cudaMemcpy(tensor->gpu_data, tensor->host_data, tensor->size_in_bytes,
cudaMemcpyHostToDevice);
DEBUG("Moving %d bytes from host to GPU \n", tensor->size_in_bytes);
tensor->data_placement = DEVICE;
}
else {
DEBUG("No data movement required - Data on Device \n");
}
}
void deviceToHostCopy(struct Tensor *tensor) {
DEBUG("*** DeviceToHost *** ");
if (tensor->data_placement != HOST) {
cudaMemcpy(tensor->host_data, tensor->gpu_data, tensor->size_in_bytes,
cudaMemcpyDeviceToHost);
DEBUG("Moving %d bytes from GPU to host \n", tensor->size_in_bytes);
tensor->data_placement = HOST;
}
else {
DEBUG("No data movement required - Data on Host \n");
}
}
void tensorCopy(void *srcTensor_ptr, void *dstTensor_ptr) {
struct Tensor *srcTensor = (struct Tensor *)srcTensor_ptr;
struct Tensor *dstTensor = (struct Tensor *)dstTensor_ptr;
if (srcTensor->data_placement == HOST) {
memcpy(dstTensor->host_data, srcTensor->host_data,
srcTensor->size_in_bytes);
DEBUG("Moving %d bytes from host to host \n", srcTensor->size_in_bytes);
dstTensor->data_placement = HOST;
}
else if (srcTensor->data_placement == DEVICE) {
cudaMemcpy(dstTensor->gpu_data, srcTensor->gpu_data,
srcTensor->size_in_bytes, cudaMemcpyDeviceToDevice);
DEBUG("Moving %d bytes from GPU to GPU \n", srcTensor->size_in_bytes);
dstTensor->data_placement = DEVICE;
}
}
void hpvm_request_tensor(void *tensor_ptr, int destination) {
Tensor *tensor = (Tensor *)tensor_ptr;
// If destination is the host
if (destination == 0) {
if (tensor->data_placement != HOST) {
cudaMemcpy(tensor->host_data, tensor->gpu_data, tensor->size_in_bytes,
cudaMemcpyDeviceToHost);
DEBUG("Moving %d bytes from GPU to host \n", tensor->size_in_bytes);
tensor->data_placement = HOST;
}
else {
DEBUG("No data movement required - Data on Host \n");
}
}
// If destination is the GPU
else if (destination == 1) {
if (tensor->data_placement != DEVICE) {
cudaMemcpy(tensor->gpu_data, tensor->host_data, tensor->size_in_bytes,
cudaMemcpyHostToDevice);
DEBUG("Moving %d bytes from host to GPU \n", tensor->size_in_bytes);
tensor->data_placement = DEVICE;
}
else {
DEBUG("No data movement required - Data on Device \n");
}
}
}
void convertToFP16(struct Tensor *tensor) {
if (tensor == NULL)
return;
if (tensor->cur_type == half_type)
return;
DEBUG("ConvertoFP16 \n");
setSizeInBytes(tensor, half_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
DEBUG("size_in_bytes = %d \n", size_in_bytes);
if (tensor->gpu_half_data == NULL)
checkCudaErrors(cudaMalloc(&tensor->gpu_half_data,
size_in_bytes)); // Allocate memory on GPU
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_half_data);
f2h((float *)tensor->gpu_data, tensor->num_elems,
(half *)tensor->gpu_half_data);
tensor->cur_type = half_type;
}
void convertToFP32(struct Tensor *tensor) {
if (tensor == NULL)
return;
// Need this check for both offline and online profiling path
if (tensor->cur_type == float_type)
return;
DEBUG("ConvertoFP32 \n");
setSizeInBytes(tensor, float_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
// If FP32 data array doesn't exist, allocate
if (tensor->gpu_data == NULL) {
checkCudaErrors(
cudaMalloc(&tensor->gpu_data, size_in_bytes)); // Allocate memory on GPU
DEBUG("NOTE: Allocating new FP32 Array with size = %lu \n", size_in_bytes);
}
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_data);
h2f((half *)tensor->gpu_half_data, tensor->num_elems,
(float *)tensor->gpu_data);
tensor->cur_type = float_type;
}
void convertToFP32_offline(struct Tensor *tensor) {
if (tensor == NULL)
return;
if (tensor->cur_type == half_type)
return;
DEBUG("ConvertoFP32 \n");
setSizeInBytes(tensor, float_type, tensor->num_elems);
size_t size_in_bytes = tensor->size_in_bytes;
// If FP32 data array doesn't exist, allocate
if (tensor->gpu_data == NULL) {
checkCudaErrors(
cudaMalloc(&tensor->gpu_data, size_in_bytes)); // Allocate memory on GPU
DEBUG("NOTE: Allocating new FP32 Array with size = %lu \n", size_in_bytes);
}
// If Tensor is one of Tracked (has to free per batch) then track all data
// types
if (tracked_tensors.find(tensor) != tracked_tensors.end())
tensors_ptr.insert(tensor->gpu_data);
h2f((half *)tensor->gpu_half_data, tensor->num_elems,
(float *)tensor->gpu_data);
tensor->cur_type = float_type;
cudaFree(tensor->gpu_half_data);
tensors_ptr.erase(tensor->gpu_half_data);
tensor->gpu_half_data = NULL;
}
// Called from within the runtime to change the data placement
// This routine is required to change the output data placements from host to
// device
void changeTensorPlacement(struct Tensor *tensor,
data_location_t data_placement) {
if (tensor == NULL)
ERROR("Tensor == NULL");
tensor->data_placement = data_placement;
}
} // end of Extern"C"
|
a226bbf15ca6d6362bf0e194f70db1c42aa34838.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define THREADS_PER_BLOCK 1024
#define THRESHOLD 67108864
__global__ void encrypt(int n, char *m, char *k, char *c){
int j, i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
for(j = 1; j <= 100; j++){
c[i] = m[i] ^ k[i];
}
}
}
int main(){
/* Serial on Host */
FILE *fp_m, *fp_k;
char *m, *k, *c, ch; /* Host copies */
char *d_m, *d_k, *d_c; /* Device copies */
int s_m, s_k, j,i;
float msecPerencrypt, msecTotal = 0.0f;
//printf("setup ...\n");
//get size of files to malloc data
if (!(fp_m = fopen("../../file.txt", "r")))
perror("failed to read message file\n");
while( fscanf(fp_m,"%c",&ch) != EOF ){
s_m++;
}
if (!(fp_k = fopen("../../key.txt", "r")))
perror("failed to read key\n");
while( fscanf(fp_k,"%c",&ch) != EOF ){
s_k++;
}
//printf("mallocs cpu...\n");
//malloc space for m, k, c
if ( !(m = (char *)malloc(sizeof(char)*s_m)) ){
printf("Failed on malloc for m\n");
exit(EXIT_FAILURE);
}
if ( !(k = (char *)malloc(sizeof(char)*s_k)) ){
printf("Failed on malloc for k\n");
exit(EXIT_FAILURE);
}
if ( !(c = (char *)malloc(sizeof(char)*s_m)) ){
printf("Failed on malloc for c\n");
exit(EXIT_FAILURE);
}
/* Alloc space for device copies of m, k, c */
hipError_t error;
printf("mallocs gpu...\n");
error = hipMalloc((void **)&d_m, s_m);
error = hipMalloc((void **)&d_k, s_k);
error = hipMalloc((void **)&d_c, s_m);
fseek(fp_m, 0, 0);
fseek(fp_k, 0, 0);
//read into buffers
printf("read data...\n");
for( j = 0; fscanf(fp_m,"%c",&ch) != EOF; j++ ){
m[j] = ch;
}
for( j = 0; fscanf(fp_k,"%c",&ch) != EOF; j++ ){
k[j] = ch;
}
/* Copy inputs to device */
printf("Copy to device...\n");
hipMemcpy(d_m, m, s_m, hipMemcpyHostToDevice);
hipMemcpy(d_k, k, s_k, hipMemcpyHostToDevice);
printf("Setting up streams...\n");
int sections = s_m/THRESHOLD;
int rem = s_m%THRESHOLD;
hipStream_t stream[sections];
for(i = 0; i < sections; i++){
hipStreamCreate(&stream[i]);
}
printf("moving on...\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
/* Parallel on Device */
/* Launch encrypt() kernel on GPU with N blocks */
for(i = 0; i < sections-1; i++ ){
hipLaunchKernelGGL(( encrypt), dim3((s_m+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, s_m, d_m, d_k, d_c);
hipStreamSynchronize(stream[i]);
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
//float msecTotal = 0.0f;
/* Copy result back to host */
error = hipMemcpy(c, d_c, s_m, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (c,d_c) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
msecPerencrypt = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerencrypt/1000.0 );
//validate for correctness
for (j = 0; j < s_m; j++){
if( c[j] != (m[j]^k[j]) ){
printf("WRONG! c[%d] != m[%d]^k[%d] ==> c='%c',m^k=%c\n", j,j,j,c[j],m[j]^k[j]);
//exit(EXIT_FAILURE);
}
}
// Compute and print the performance
//float msecPerencrypt = msecTotal / 1;
/* Cleanup */
/* Destroy streams */
//for (j = 0; j < sections; j++){
//hipStreamDestroy(stream[j]);
//}
free(m); free(k); free(c);
hipFree(d_m); hipFree(d_k); hipFree(d_c);
fclose(fp_m); fclose(fp_k);
return 0;
}
| a226bbf15ca6d6362bf0e194f70db1c42aa34838.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define THREADS_PER_BLOCK 1024
#define THRESHOLD 67108864
__global__ void encrypt(int n, char *m, char *k, char *c){
int j, i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
for(j = 1; j <= 100; j++){
c[i] = m[i] ^ k[i];
}
}
}
int main(){
/* Serial on Host */
FILE *fp_m, *fp_k;
char *m, *k, *c, ch; /* Host copies */
char *d_m, *d_k, *d_c; /* Device copies */
int s_m, s_k, j,i;
float msecPerencrypt, msecTotal = 0.0f;
//printf("setup ...\n");
//get size of files to malloc data
if (!(fp_m = fopen("../../file.txt", "r")))
perror("failed to read message file\n");
while( fscanf(fp_m,"%c",&ch) != EOF ){
s_m++;
}
if (!(fp_k = fopen("../../key.txt", "r")))
perror("failed to read key\n");
while( fscanf(fp_k,"%c",&ch) != EOF ){
s_k++;
}
//printf("mallocs cpu...\n");
//malloc space for m, k, c
if ( !(m = (char *)malloc(sizeof(char)*s_m)) ){
printf("Failed on malloc for m\n");
exit(EXIT_FAILURE);
}
if ( !(k = (char *)malloc(sizeof(char)*s_k)) ){
printf("Failed on malloc for k\n");
exit(EXIT_FAILURE);
}
if ( !(c = (char *)malloc(sizeof(char)*s_m)) ){
printf("Failed on malloc for c\n");
exit(EXIT_FAILURE);
}
/* Alloc space for device copies of m, k, c */
cudaError_t error;
printf("mallocs gpu...\n");
error = cudaMalloc((void **)&d_m, s_m);
error = cudaMalloc((void **)&d_k, s_k);
error = cudaMalloc((void **)&d_c, s_m);
fseek(fp_m, 0, 0);
fseek(fp_k, 0, 0);
//read into buffers
printf("read data...\n");
for( j = 0; fscanf(fp_m,"%c",&ch) != EOF; j++ ){
m[j] = ch;
}
for( j = 0; fscanf(fp_k,"%c",&ch) != EOF; j++ ){
k[j] = ch;
}
/* Copy inputs to device */
printf("Copy to device...\n");
cudaMemcpy(d_m, m, s_m, cudaMemcpyHostToDevice);
cudaMemcpy(d_k, k, s_k, cudaMemcpyHostToDevice);
printf("Setting up streams...\n");
int sections = s_m/THRESHOLD;
int rem = s_m%THRESHOLD;
cudaStream_t stream[sections];
for(i = 0; i < sections; i++){
cudaStreamCreate(&stream[i]);
}
printf("moving on...\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/* Parallel on Device */
/* Launch encrypt() kernel on GPU with N blocks */
for(i = 0; i < sections-1; i++ ){
encrypt<<<(s_m+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(s_m, d_m, d_k, d_c);
cudaStreamSynchronize(stream[i]);
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//float msecTotal = 0.0f;
/* Copy result back to host */
error = cudaMemcpy(c, d_c, s_m, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (c,d_c) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
msecPerencrypt = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerencrypt/1000.0 );
//validate for correctness
for (j = 0; j < s_m; j++){
if( c[j] != (m[j]^k[j]) ){
printf("WRONG! c[%d] != m[%d]^k[%d] ==> c='%c',m^k=%c\n", j,j,j,c[j],m[j]^k[j]);
//exit(EXIT_FAILURE);
}
}
// Compute and print the performance
//float msecPerencrypt = msecTotal / 1;
/* Cleanup */
/* Destroy streams */
//for (j = 0; j < sections; j++){
//cudaStreamDestroy(stream[j]);
//}
free(m); free(k); free(c);
cudaFree(d_m); cudaFree(d_k); cudaFree(d_c);
fclose(fp_m); fclose(fp_k);
return 0;
}
|
31e183a84a1ab84f76146007640366eb789070f4.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Row Major for
// Matrix A, Column Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = cutlass::layout::RowMajor;
using ReorderedLayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(2),
ElementInputA(-2),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(2),
ElementInputB(-2),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(2),
ElementOutput(-2),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e_reordered.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
| 31e183a84a1ab84f76146007640366eb789070f4.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Row Major for
// Matrix A, Column Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = cutlass::layout::RowMajor;
using ReorderedLayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(2),
ElementInputA(-2),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(2),
ElementInputB(-2),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(2),
ElementOutput(-2),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e_reordered.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
|
c8dd03f06dadfb96375198ea62049c76c9ab375d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <Windows.h>
#include <assert.h>
#define N_EQUATIONS (1<<25)
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START() { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL( hipEventDestroy( cuda_timer_start ) );
CUDA_CALL( hipEventDestroy( cuda_timer_stop ) );
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
__host__ void cuda_error_check(const char * prefix, const char * postfix)
{
if (hipPeekAtLastError() != hipSuccess)
{
printf("%s%s%s", prefix, hipGetErrorString(hipGetLastError()), postfix);
hipDeviceReset();
//wait_exit();
exit(1);
}
}
#define MAX_N_ELEMENTS (1 << 25)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
// gpu code
void find_roots_CPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int i;
float a, b, c, d, x0, x1, tmp;
for (i = 0; i < n; i++)
{
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
}
__global__ void find_roots_GPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1)
{
unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
float a,b,c,d,x0,x1,tmp;
a = A[tid]; b = B[tid], c = C[tid];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[tid] = x0 = (-b - d) * tmp;
X1[tid] = x1 = (-b + d) * tmp;
FX0[tid] = (a*x0 + b)*x0 + c;
FX1[tid] = (a*x1 + b)*x1 + c;
}
void main(void) {
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
FILE *fp = fopen( "abc.bin", "rb" );
int n;
fread( &n, sizeof( int ), 1, fp );
float *A = new float[ N_EQUATIONS ];
float *B = new float[ N_EQUATIONS ];
float *C = new float[ N_EQUATIONS ];
float *X0 = new float[ N_EQUATIONS ];
float *X1 = new float[ N_EQUATIONS ];
float *FX0 = new float[ N_EQUATIONS ];
float *FX1 = new float[ N_EQUATIONS ];
float *CPU_X0 = new float[ N_EQUATIONS ];
float *CPU_X1 = new float[ N_EQUATIONS ];
float *CPU_FX0 = new float[ N_EQUATIONS ];
float *CPU_FX1 = new float[ N_EQUATIONS ];
fread( A, sizeof( float ), N_EQUATIONS, fp );
fread( B, sizeof( float ), N_EQUATIONS, fp );
fread( C, sizeof( float ), N_EQUATIONS, fp );
fclose(fp);
printf( "*** CPU Works...\n" );
CHECK_TIME_START();
find_roots_CPU(A,B,C,CPU_X0,CPU_X1,CPU_FX0,CPU_FX1,n);
CHECK_TIME_END( compute_time );
printf( " - Finish\n\n" );
CUDA_CALL(hipSetDevice(0));
float *cudaA,*cudaB,*cudaC,*cudaX0,*cudaX1,*cudaFX0,*cudaFX1;
CUDA_CALL(hipMalloc(&cudaA, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaB, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaC, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaX0, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaX1, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaFX0, sizeof(float) * N_EQUATIONS));
CUDA_CALL(hipMalloc(&cudaFX1, sizeof(float) * N_EQUATIONS));
printf("*** Copying A and B and C from host to device...\n");
CUDA_CALL(hipMemcpy(cudaA, A, sizeof(float) * N_EQUATIONS, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(cudaB, B, sizeof(float) * N_EQUATIONS, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(cudaC, C, sizeof(float) * N_EQUATIONS, hipMemcpyHostToDevice));
printf(" - Finish\n\n");
CHECK_TIME_INIT_GPU();
size_t n_threads = (1<<10);
size_t n_blocks = n / n_threads;
printf("*** kernel call: Get X0 X1 FX0 FX1 <<< %d, %d >>>()...\n", n_blocks, n_threads);
CHECK_TIME_START_GPU();
hipLaunchKernelGGL(( find_roots_GPU), dim3(n_blocks), dim3(n_threads) , 0, 0, cudaA,cudaB,cudaC,cudaX0,cudaX1,cudaFX0,cudaFX1);
cuda_error_check("- ", " FAILED: X0 X1 FX0 FX()\n\n");
CHECK_TIME_END_GPU(device_time);
printf( " - Finish\n\n" );
printf("*** Time taken = %.6fms(CPU), %.6fms(GPU)\n", compute_time, device_time);
printf("*** Copying Y from device to host...\n");
CUDA_CALL(hipMemcpy(X0, cudaX0, sizeof(float) * N_EQUATIONS, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(X1, cudaX1, sizeof(float) * N_EQUATIONS, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(FX0, cudaFX0, sizeof(float) * N_EQUATIONS, hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(FX1, cudaFX1, sizeof(float) * N_EQUATIONS, hipMemcpyDeviceToHost));
CUDA_CALL( hipDeviceSynchronize() );
/* */
int cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(X0[ i ] - CPU_X0[ i ]) > 0.00001 )
cnt++;
}
printf( "X0 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if(fabs(X1[ i ] - CPU_X1[ i ]) > 0.00001 )
cnt++;
}
printf( "X1 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(FX0[ i ] - CPU_FX0[ i ]) > 0.00001 )
cnt++;
}
printf( "FX0 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(FX1[ i ] - CPU_FX1[ i ]) > 0.00001 )
cnt++;
}
printf("FX1 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
printf(" - Finish\n\n");
FILE *fp1 = fopen( "X0.bin", "wb" );
fwrite( X0, sizeof( float ), N_EQUATIONS, fp1);
fclose(fp1);
FILE *fp2 = fopen( "X1.bin", "wb" );
fwrite( X1, sizeof( float ), N_EQUATIONS, fp2);
fclose(fp2);
FILE *fp3 = fopen( "FX0.bin", "wb" );
fwrite( FX0, sizeof( float ), N_EQUATIONS, fp3);
fclose(fp3);
FILE *fp4 = fopen( "FX1.bin", "wb" );
fwrite( FX1, sizeof( float ), N_EQUATIONS, fp4);
fclose(fp4);
hipFree(cudaA);
hipFree(cudaB);
hipFree(cudaC);
hipFree(cudaX0);
hipFree(cudaX1);
hipFree(cudaFX0);
hipFree(cudaFX1);
CHECK_TIME_DEST_GPU();
CUDA_CALL(hipDeviceReset());
delete[] A;
delete[] B;
delete[] C;
delete[] X0;
delete[] X1;
delete[] FX0;
delete[] FX1;
} | c8dd03f06dadfb96375198ea62049c76c9ab375d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <Windows.h>
#include <assert.h>
#define N_EQUATIONS (1<<25)
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START() { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL( cudaEventDestroy( cuda_timer_start ) );
CUDA_CALL( cudaEventDestroy( cuda_timer_stop ) );
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
__host__ void cuda_error_check(const char * prefix, const char * postfix)
{
if (cudaPeekAtLastError() != cudaSuccess)
{
printf("%s%s%s", prefix, cudaGetErrorString(cudaGetLastError()), postfix);
cudaDeviceReset();
//wait_exit();
exit(1);
}
}
#define MAX_N_ELEMENTS (1 << 25)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
// gpu code
void find_roots_CPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int i;
float a, b, c, d, x0, x1, tmp;
for (i = 0; i < n; i++)
{
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
}
__global__ void find_roots_GPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1)
{
unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
float a,b,c,d,x0,x1,tmp;
a = A[tid]; b = B[tid], c = C[tid];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[tid] = x0 = (-b - d) * tmp;
X1[tid] = x1 = (-b + d) * tmp;
FX0[tid] = (a*x0 + b)*x0 + c;
FX1[tid] = (a*x1 + b)*x1 + c;
}
void main(void) {
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
FILE *fp = fopen( "abc.bin", "rb" );
int n;
fread( &n, sizeof( int ), 1, fp );
float *A = new float[ N_EQUATIONS ];
float *B = new float[ N_EQUATIONS ];
float *C = new float[ N_EQUATIONS ];
float *X0 = new float[ N_EQUATIONS ];
float *X1 = new float[ N_EQUATIONS ];
float *FX0 = new float[ N_EQUATIONS ];
float *FX1 = new float[ N_EQUATIONS ];
float *CPU_X0 = new float[ N_EQUATIONS ];
float *CPU_X1 = new float[ N_EQUATIONS ];
float *CPU_FX0 = new float[ N_EQUATIONS ];
float *CPU_FX1 = new float[ N_EQUATIONS ];
fread( A, sizeof( float ), N_EQUATIONS, fp );
fread( B, sizeof( float ), N_EQUATIONS, fp );
fread( C, sizeof( float ), N_EQUATIONS, fp );
fclose(fp);
printf( "*** CPU Works...\n" );
CHECK_TIME_START();
find_roots_CPU(A,B,C,CPU_X0,CPU_X1,CPU_FX0,CPU_FX1,n);
CHECK_TIME_END( compute_time );
printf( " - Finish\n\n" );
CUDA_CALL(cudaSetDevice(0));
float *cudaA,*cudaB,*cudaC,*cudaX0,*cudaX1,*cudaFX0,*cudaFX1;
CUDA_CALL(cudaMalloc(&cudaA, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaB, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaC, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaX0, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaX1, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaFX0, sizeof(float) * N_EQUATIONS));
CUDA_CALL(cudaMalloc(&cudaFX1, sizeof(float) * N_EQUATIONS));
printf("*** Copying A and B and C from host to device...\n");
CUDA_CALL(cudaMemcpy(cudaA, A, sizeof(float) * N_EQUATIONS, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(cudaB, B, sizeof(float) * N_EQUATIONS, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(cudaC, C, sizeof(float) * N_EQUATIONS, cudaMemcpyHostToDevice));
printf(" - Finish\n\n");
CHECK_TIME_INIT_GPU();
size_t n_threads = (1<<10);
size_t n_blocks = n / n_threads;
printf("*** kernel call: Get X0 X1 FX0 FX1 <<< %d, %d >>>()...\n", n_blocks, n_threads);
CHECK_TIME_START_GPU();
find_roots_GPU<<<n_blocks, n_threads >>>(cudaA,cudaB,cudaC,cudaX0,cudaX1,cudaFX0,cudaFX1);
cuda_error_check("- ", " FAILED: X0 X1 FX0 FX()\n\n");
CHECK_TIME_END_GPU(device_time);
printf( " - Finish\n\n" );
printf("*** Time taken = %.6fms(CPU), %.6fms(GPU)\n", compute_time, device_time);
printf("*** Copying Y from device to host...\n");
CUDA_CALL(cudaMemcpy(X0, cudaX0, sizeof(float) * N_EQUATIONS, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(X1, cudaX1, sizeof(float) * N_EQUATIONS, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(FX0, cudaFX0, sizeof(float) * N_EQUATIONS, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(FX1, cudaFX1, sizeof(float) * N_EQUATIONS, cudaMemcpyDeviceToHost));
CUDA_CALL( cudaDeviceSynchronize() );
/* °ΛΑυ */
int cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(X0[ i ] - CPU_X0[ i ]) > 0.00001 )
cnt++;
}
printf( "X0 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if(fabs(X1[ i ] - CPU_X1[ i ]) > 0.00001 )
cnt++;
}
printf( "X1 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(FX0[ i ] - CPU_FX0[ i ]) > 0.00001 )
cnt++;
}
printf( "FX0 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
cnt = 0;
for( int i = 0; i < N_EQUATIONS ; ++i )
{
if( fabs(FX1[ i ] - CPU_FX1[ i ]) > 0.00001 )
cnt++;
}
printf("FX1 - %.2f%% numerical errors...\n", cnt/(float)(N_EQUATIONS)*100 );
printf(" - Finish\n\n");
FILE *fp1 = fopen( "X0.bin", "wb" );
fwrite( X0, sizeof( float ), N_EQUATIONS, fp1);
fclose(fp1);
FILE *fp2 = fopen( "X1.bin", "wb" );
fwrite( X1, sizeof( float ), N_EQUATIONS, fp2);
fclose(fp2);
FILE *fp3 = fopen( "FX0.bin", "wb" );
fwrite( FX0, sizeof( float ), N_EQUATIONS, fp3);
fclose(fp3);
FILE *fp4 = fopen( "FX1.bin", "wb" );
fwrite( FX1, sizeof( float ), N_EQUATIONS, fp4);
fclose(fp4);
cudaFree(cudaA);
cudaFree(cudaB);
cudaFree(cudaC);
cudaFree(cudaX0);
cudaFree(cudaX1);
cudaFree(cudaFX0);
cudaFree(cudaFX1);
CHECK_TIME_DEST_GPU();
CUDA_CALL(cudaDeviceReset());
delete[] A;
delete[] B;
delete[] C;
delete[] X0;
delete[] X1;
delete[] FX0;
delete[] FX1;
} |
b52063c97a707ed4c2618bd3352aa2113b5c6d70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <objects/point/points.cuh>
#include <objects/point/kernels.cu>
#include <core/ImguiConfig.hpp>
void Points::init()
{
isPing = true;
//Generating buffers
glGenBuffers (BUFFER_COUNT, BUFFER);
std::cout<<glGetError()<<std::endl;
//init buffers
uint *tempUint = new uint[MAX_NUM_POINTS];
for (uint i = BUFFER_INDICES; i<= BUFFER_CELLID_PONG; i++)
{
glBindBuffer (GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(uint), tempUint, GL_DYNAMIC_DRAW);
}
delete tempUint;
float *tempFloat = new float[MAX_NUM_POINTS*3];
for (uint i=BUFFER_POSITIONS_PING; i<=BUFFER_NORMALS; i++)
{
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(float) * 3, tempFloat, GL_DYNAMIC_DRAW);
}
delete tempFloat;
for (uint i=BUFFER_DENSITIES_PING; i<=BUFFER_FREEZEPOINT; i++)
{
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(float) , 0, GL_DYNAMIC_DRAW);
}
std::cout<<glGetError()<<std::endl;
//Setting Ping VAO
glGenVertexArrays (1, &pingVAO);
glBindVertexArray (pingVAO);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS_PING]);
glEnableVertexAttribArray (0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP_PING]);
glEnableVertexAttribArray (1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_COLORGRAD_PING]);
glEnableVertexAttribArray (2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_DENSITIES_PING]);
glEnableVertexAttribArray (3);
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 0, NULL);
//Setting Pong VAO;
glGenVertexArrays (1, &pongVAO);
glBindVertexArray (pongVAO);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS_PONG]);
glEnableVertexAttribArray (0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP_PONG]);
glEnableVertexAttribArray (1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_COLORGRAD_PONG]);
glEnableVertexAttribArray (2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_DENSITIES_PONG]);
glEnableVertexAttribArray (3);
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 0, NULL);
//load temperature texture
loadTempText();
//Register CUDA resources
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk( hipGraphicsGLRegisterBuffer(&(resources[i]), BUFFER[i], hipGraphicsRegisterFlagsWriteDiscard));
//allocate space for constants
//Allocate and init grid
gpuErrchk(hipMalloc((void**)&(grid.gridRes), sizeof(uint3)));
gpuErrchk(hipMalloc((void**)&(grid.startPoint), sizeof(float3)));
gpuErrchk(hipMalloc((void**)&(grid.cellSize), sizeof(float3)));
grid.gridRes = make_uint3(16, 16, 16);
grid.startPoint = make_float3(-15, -15, -15);
grid.cellSize = make_float3( -grid.startPoint.x*2/grid.gridRes.x, -grid.startPoint.y*2/grid.gridRes.y, -grid.startPoint.z*2/grid.gridRes.z);
gpuErrchk(hipMalloc((void**)&(grid.count), sizeof(uint)*grid.gridRes.x*grid.gridRes.y*grid.gridRes.z));
gpuErrchk(hipMalloc((void**)&(grid.firstIdx), sizeof(uint)*grid.gridRes.x*grid.gridRes.y*grid.gridRes.z));
gpuErrchk(hipMalloc((void**)&dSumArray, sizeof(uint)
*(grid.gridRes.x * grid.gridRes.y * grid.gridRes.z -1)/2048+1)); //dead param
gpuErrchk(hipMalloc((void**)&dDensSum, sizeof(float)));
gpuErrchk(hipMalloc((void**)&dDensCount, sizeof(int)));
gpuErrchk(hipMalloc((void**)&tempPos, sizeof(float)*3*MAX_NUM_POINTS));
}
void Points::setValues ( const GLfloat *val, const GLuint &num, const GLuint &size, const int &bufferIdx)
{
numPoints = num;
gpuErrchk(hipMemcpy(dNumPoints, &numPoints, sizeof(uint), hipMemcpyHostToDevice));
if ( numPoints > MAX_NUM_POINTS)
exit(-1);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[bufferIdx]);
glBufferSubData(GL_ARRAY_BUFFER, 0, num*size*sizeof(GLfloat), val);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Points::addPars( const GLfloat *pos, const GLfloat *temp, const GLfloat *velos, const GLuint &num, const GLuint *s)
{
if ( numPoints+num >= MAX_NUM_POINTS)
{
std::cout<<"To many particles\n";
exit(-1);
}
int BUFFER_POSITIONS = 0;
int BUFFER_TEMP = 0;
int BUFFER_STATES = 0;
int BUFFER_VELOCITIES = 0;
if (isPing)
{
BUFFER_POSITIONS = BUFFER_POSITIONS_PING;
BUFFER_TEMP = BUFFER_TEMP_PING;
BUFFER_STATES = BUFFER_STATES_PING;
BUFFER_VELOCITIES = BUFFER_VELOCITIES_PING;
}
else
{
BUFFER_POSITIONS = BUFFER_POSITIONS_PONG;
BUFFER_TEMP = BUFFER_TEMP_PONG;
BUFFER_STATES = BUFFER_STATES_PONG;
BUFFER_VELOCITIES = BUFFER_VELOCITIES_PONG;
}
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS]);
glBufferSubData(GL_ARRAY_BUFFER, 3*numPoints*sizeof(GLfloat), 3*num*sizeof(GLfloat), pos);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP]);
glBufferSubData(GL_ARRAY_BUFFER, numPoints*sizeof(GLfloat), num*sizeof(GLfloat), temp);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_STATES]);
glBufferSubData(GL_ARRAY_BUFFER, numPoints*sizeof(GLuint), num*sizeof(GLuint), s);
//set velo to zero
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_VELOCITIES]);
glBufferSubData(GL_ARRAY_BUFFER, 3*numPoints*sizeof(GLfloat), 3*num*sizeof(GLfloat), velos);
glBindBuffer(GL_ARRAY_BUFFER, 0);
numPoints += num;
}
void Points::update()
{
runKernels();
float GPUsum;
int GPUcount;
gpuErrchk(hipMemcpy(&GPUsum, dDensSum, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&GPUcount, dDensCount, sizeof(int), hipMemcpyDeviceToHost));
surfaceDens = GPUsum / GPUcount;
}
void Points::draw()
{
if (isPing)
glBindVertexArray(pingVAO);
else
glBindVertexArray(pongVAO);
glDrawArrays( GL_POINTS, 0, numPoints);
glBindVertexArray(0);
isPing = !isPing; //swap buffer
}
void Points::loadTempText() //load texture with il
{
ILuint imageID; // Create an image ID as a ULuint
ilInit();
GLuint textureID; // Create a texture ID as a GLuint
ILboolean success; // Create a flag to keep track of success/failure
ILenum error; // Create a flag to keep track of the IL error state
ilGenImages(1, &imageID); // Generate the image ID
ilBindImage(imageID); // Bind the image
success = ilLoadImage("./assets/temperature.png"); // Load the image file
// If we managed to load the image, then we can start to do things with it...
if (success)
{
// If the image is flipped (i.e. upside-down and mirrored, flip it the right way up!)
ILinfo ImageInfo;
iluGetImageInfo(&ImageInfo);
if (ImageInfo.Origin == IL_ORIGIN_UPPER_LEFT)
{
iluFlipImage();
}
// Convert the image into a suitable format to work with
// NOTE: If your image contains alpha channel you can replace IL_RGB with IL_RGBA
success = ilConvertImage(IL_RGB, IL_UNSIGNED_BYTE);
// Quit out if we failed the conversion
if (!success)
{
error = ilGetError();
std::cout << "Image conversion failed - IL reports error: " << error << " - " << iluErrorString(error) << std::endl;
exit(-1);
}
// Generate a new texture
glGenTextures(1, &textureID);
// Bind the texture to a name
glBindTexture(GL_TEXTURE_2D, textureID);
// Set texture clamping method
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Set texture interpolation method to use linear interpolation (no MIPMAPS)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Specify the texture specification
glTexImage2D(GL_TEXTURE_2D, // Type of texture
0, // Pyramid level (for mip-mapping) - 0 is the top level
ilGetInteger(IL_IMAGE_FORMAT), // Internal pixel format to use. Can be a generic type like GL_RGB or GL_RGBA, or a sized type
ilGetInteger(IL_IMAGE_WIDTH), // Image width
ilGetInteger(IL_IMAGE_HEIGHT), // Image height
0, // Border width in pixels (can either be 1 or 0)
ilGetInteger(IL_IMAGE_FORMAT), // Format of image pixel data
GL_UNSIGNED_BYTE, // Image data type
ilGetData()); // The actual image data itself
}
else // If we failed to open the image file in the first place...
{
error = ilGetError();
std::cout << "Image load failed - IL reports error: " << error << " - " << iluErrorString(error) << std::endl;
exit(-1);
}
ilDeleteImages(1, &imageID); // Because we have already copied image data into texture data we can release memory used by image.
std::cout << "Texture creation successful." << std::endl;
tex1 = textureID; // Return the GLuint to the texture so you can use it!
}
void Points::runKernelsPCISPH()
{
hipMemset(dDensSum, 0, sizeof(float));
hipMemset(dDensCount, 0, sizeof(int));
if (numPoints == 0)
return;
//get pointers
for (int i=0; i<BUFFER_COUNT; i++)
{
gpuErrchk(hipGraphicsMapResources(1, &(resources[i])));
gpuErrchk(hipGraphicsResourceGetMappedPointer( &(dPointers[i]), &(numBytes[i]), resources[i]));
}
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
if (!isPing)
{
//swap ping pong pointers
Points::swapPtr(dPointers[BUFFER_STATES_PING], dPointers[BUFFER_STATES_PONG]);
Points::swapPtr(dPointers[BUFFER_CELLID_PING], dPointers[BUFFER_CELLID_PONG]);
Points::swapPtr(dPointers[BUFFER_POSITIONS_PING], dPointers[BUFFER_POSITIONS_PONG]);
Points::swapPtr(dPointers[BUFFER_VELOCITIES_PING], dPointers[BUFFER_VELOCITIES_PONG]);
Points::swapPtr(dPointers[BUFFER_TEMP_PING], dPointers[BUFFER_TEMP_PONG]);
Points::swapPtr(dPointers[BUFFER_COLORGRAD_PING], dPointers[BUFFER_COLORGRAD_PONG]);
Points::swapPtr(dPointers[BUFFER_DENSITIES_PING], dPointers[BUFFER_DENSITIES_PONG]);
}
hipLaunchKernelGGL(( resetGrid), dim3(16*16), dim3(16), 0, 0,
grid.count,
grid.gridRes);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk(hipMemcpy(dNumPoints, &numPoints, sizeof(uint), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( insertPars), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
(float3*)dPointers[BUFFER_POSITIONS_PING],
(uint*)dPointers[BUFFER_CELLID_PING],
dNumPoints,
(uint*)dPointers[BUFFER_OFFSET],
grid.startPoint,
grid.cellSize,
grid.gridRes,
grid.count,
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefixSumKernel), dim3(2),dim3(1024), 0, 0, grid.count,grid.firstIdx,(uint*)dSumArray, grid.gridRes.x*grid.gridRes.y*grid.gridRes.z);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefexSumPostShort), dim3(2), dim3(1024), 0, 0, grid.firstIdx, (uint*)dSumArray, 2);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( countingSort), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
grid.firstIdx,
(uint*)dPointers[BUFFER_OFFSET],
(uint*)dPointers[BUFFER_CELLID_PING],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.count,
(float3*)dPointers[BUFFER_VELOCITIES_PING],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_POSITIONS_PING],
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(States*)dPointers[BUFFER_STATES_PING],
(States*)dPointers[BUFFER_STATES_PONG],
(float*)dPointers[BUFFER_TEMP_PING],
(float*)dPointers[BUFFER_TEMP_PONG],
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk(hipMemcpy(&numPoints, dNumPoints, sizeof(uint), hipMemcpyDeviceToHost));
if (numPoints != 0)
{
hipLaunchKernelGGL(( calculateDensitiesGlobal), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.firstIdx,
grid.count,
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
//compute external forces
}
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk(hipGraphicsUnmapResources(1, &(resources[i])));
}
void Points::runKernels()
{
if (numPoints == 0)
return;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//get pointers
for (int i=0; i<BUFFER_COUNT; i++)
{
gpuErrchk(hipGraphicsMapResources(1, &(resources[i])));
gpuErrchk(hipGraphicsResourceGetMappedPointer( &(dPointers[i]), &(numBytes[i]), resources[i]));
}
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Num of Points\t %d", numPoints);
ImGui::Text("Mapping\t %f",elapsedTime);
/*ping:
ping -sort-> pong -> render
pong
pong -sort-> ping -> render
*/
if (!isPing)
{
//swap ping pong pointers
Points::swapPtr(dPointers[BUFFER_STATES_PING], dPointers[BUFFER_STATES_PONG]);
Points::swapPtr(dPointers[BUFFER_CELLID_PING], dPointers[BUFFER_CELLID_PONG]);
Points::swapPtr(dPointers[BUFFER_POSITIONS_PING], dPointers[BUFFER_POSITIONS_PONG]);
Points::swapPtr(dPointers[BUFFER_VELOCITIES_PING], dPointers[BUFFER_VELOCITIES_PONG]);
Points::swapPtr(dPointers[BUFFER_TEMP_PING], dPointers[BUFFER_TEMP_PONG]);
Points::swapPtr(dPointers[BUFFER_COLORGRAD_PING], dPointers[BUFFER_COLORGRAD_PONG]);
Points::swapPtr(dPointers[BUFFER_DENSITIES_PING], dPointers[BUFFER_DENSITIES_PONG]);
}
hipEventRecord(start, 0);
hipLaunchKernelGGL(( resetGrid), dim3(16*16), dim3(16), 0, 0,
grid.count,
grid.gridRes);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Reset Grid\t %f",elapsedTime);
//insert particles and count
//std::cout<<"Inserting particles...\n";
gpuErrchk(hipMemcpy(dNumPoints, &numPoints, sizeof(uint), hipMemcpyHostToDevice));
hipEventRecord(start, 0);
hipLaunchKernelGGL(( insertPars), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
(float3*)dPointers[BUFFER_POSITIONS_PING],
(uint*)dPointers[BUFFER_CELLID_PING],
dNumPoints,
(uint*)dPointers[BUFFER_OFFSET],
grid.startPoint,
grid.cellSize,
grid.gridRes,
grid.count,
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Insert Pars\t %f",elapsedTime);
//do prefix sum on particles
hipEventRecord(start, 0);
hipLaunchKernelGGL(( prefixSumKernel), dim3(2),dim3(1024), 0, 0, grid.count,grid.firstIdx,(uint*)dSumArray, grid.gridRes.x*grid.gridRes.y*grid.gridRes.z);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefexSumPostShort), dim3(2), dim3(1024), 0, 0, grid.firstIdx, (uint*)dSumArray, 2);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
//std::cout<<"counting sort...\n";
hipLaunchKernelGGL(( countingSort), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
grid.firstIdx,
(uint*)dPointers[BUFFER_OFFSET],
(uint*)dPointers[BUFFER_CELLID_PING],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.count,
(float3*)dPointers[BUFFER_VELOCITIES_PING],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_POSITIONS_PING],
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(States*)dPointers[BUFFER_STATES_PING],
(States*)dPointers[BUFFER_STATES_PONG],
(float*)dPointers[BUFFER_TEMP_PING],
(float*)dPointers[BUFFER_TEMP_PONG],
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Counting sort\t %f",elapsedTime);
//std::cout<<"Copying data back...\n";
gpuErrchk(hipMemcpy(&numPoints, dNumPoints, sizeof(uint), hipMemcpyDeviceToHost));
if (numPoints != 0)
{
//===============++Densities++=====================
//std::cout<<"Calculating densities... numOfPar: "<<max((numPoints-1)/1024+1, 1)<<std::endl;;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( calculateDensitiesGlobal), dim3(max((numPoints-1)/1024+1, 1)), dim3(1024), 0, 0,
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.firstIdx,
grid.count,
numPoints);
//calculateDensitiesShared<<<16*16*16, 100>>>(
// (float3*)dPointers[BUFFER_POSITIONS_PONG],
// (float*)dPointers[BUFFER_DENSITIES],
// (uint*)dPointers[BUFFER_CELLID_PONG],
// grid.firstIdx,
// grid.count,
// numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Densities\t %f",elapsedTime);
//Color grad
//hipEventRecord(start, 0);
// computeColorGradsGlobal<<<max((numPoints-1)/1024+1, 1), 1024>>>(
// (float3*)dPointers[BUFFER_POSITIONS_PONG],
// (float*)dPointers[BUFFER_DENSITIES_PONG],
// (float3*)dPointers[BUFFER_COLORGRAD_PONG],
// (float*)dPointers[BUFFER_TEMP_PONG],
// (float*)dDensSum,
// (uint*)dDensCount,
// (uint*)dPointers[BUFFER_STATES_PONG],
// (uint*)dPointers[BUFFER_CELLID_PONG],
// grid.firstIdx,
// grid.count,
// numPoints
// );
// gpuErrchk( hipPeekAtLastError() );
// gpuErrchk( hipDeviceSynchronize() );
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&elapsedTime, start, stop);
//ImGui::Text("Color Grad\t %f",elapsedTime);
//=============++Forces++=======================
//std::cout<<"Computing forces...\n";
#define FORCE_BLOCK_DIM 512
hipEventRecord(start, 0);
hipLaunchKernelGGL(( calculateForcesGlobal), dim3(max((numPoints-1)/FORCE_BLOCK_DIM+1,1)), dim3(FORCE_BLOCK_DIM), 0, 0,
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_PRESSURES],
(float3*)dPointers[BUFFER_COLORGRAD_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
(uint*)dPointers[BUFFER_STATES_PONG],
(uint*)grid.firstIdx,
(uint*)grid.count,
(float3*)dPointers[BUFFER_NORMALS],
numPoints);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Forces\t %f",elapsedTime);
}
//Unmap pointers;
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk(hipGraphicsUnmapResources(1, &(resources[i])));
}
void Points::insertParsFromOBJ(std::string fileName, float temperature, GLuint state)
{
std::cout<<"Inserting OBJ "<<fileName<<std::endl;
std::vector<GLfloat> vertices;
std::vector<GLfloat> vals;
std::vector<GLuint> states;
std::vector<GLfloat> velos;
std::string str;
std::ifstream inFile(fileName);
while(std::getline(inFile, str))
{
splitstring split(str);
vector<std::string> splited = split.split(' ');
if (splited.size() > 0)
if (splited[0] == "v")
for (int i=1; i<splited.size(); i++)
vertices.push_back(std::stof(splited[i]));
}
for (auto i=0; i<vertices.size()/3; i++)
{
vals.push_back(temperature);
states.push_back(state);
velos.push_back(0.0);
velos.push_back(0.0);
velos.push_back(0.0);
}
std::cout<<states.size()<<std::endl;;
std::cout<<"Num vertices: "<<vertices.size()<<std::endl;
addPars(&vertices[0], &vals[0], &velos[0], states.size(), &states[0]);
}
void Points::insertBowl(const glm::vec3 ¢er, const glm::vec3 &dir)
{
const float radius = 1.0;
const uint X=16;
const uint Y=16;
const uint Z=16;
const float step = 10.0/32.0;
std::vector<GLfloat> pos;
std::vector<GLfloat> vals;
std::vector<GLuint> states;
std::vector<GLfloat> velos;
pos.reserve(X*Y*Z*3);
vals.reserve(X*Y*Z);
states.reserve(X*Y*Z);
velos.reserve(X*Y*Z*3);
GLuint numPar = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
float r = (step*i-2.5)*(step*i-2.5)+
(step*j-2.5)*(step*j-2.5)+
(step*k-2.5)*(step*k-2.5);
if (
r <
radius * radius
&&
r > radius * radius -0.5
&&
step*j-2.5 < 0.0
){
pos.push_back(center.x-2.5 + step * i);
pos.push_back(center.y-2.5 + step * j);
pos.push_back(center.z-2.5 + step * k);
vals.push_back(80.0);
states.push_back(0);
velos.push_back(dir.x*200);
velos.push_back(dir.y*200);
velos.push_back(dir.z*200);
numPar++;
}
}
addPars(&pos[0], &vals[0], &velos[0], numPar, &states[0]);
}
void Points::insertCUBE(const glm::vec3 ¢er, const glm::vec3 &dir)
{
//init a grid or pars
const uint X=20;
const uint Y=40;
const uint Z=40;
float step = 10.0/16.0;
GLfloat pos[X*Y*Z*3];
GLfloat vals[X*Y*Z];
GLuint states[X*Y*Z];
GLfloat velos[X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = center.x-7 + step * i;
pos[parIdx++] = center.y-14 + step * j;
pos[parIdx++] = center.z-14 + step * k;
}
for (uint i=0; i<X*Y*Z; i++)
{
vals[i] = 80.0;
states[i] = 0;
velos[3*i+0] = dir.x*100;
velos[3*i+1] = dir.y*100;
velos[3*i+2] = dir.z*100;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
void Points::insertCUBEE()
{
//init a grid or pars
const uint X=64;
const uint Y=4;
const uint Z=64;
float step = 10.0/16.0;
GLfloat pos[X*Y*Z*3];
GLfloat vals[X*Y*Z];
GLuint states[X*Y*Z];
GLfloat velos[X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = -20 + step * i;
pos[parIdx++] = -20 + step * k;
pos[parIdx++] = -10 + step * j;
}
for (uint i=0; i<X*Y*Z; i++)
{
vals[i] = 10.0;
states[i] = 0;
velos[3*i+0] = 0.0;
velos[3*i+1] = 0.0;
velos[3*i+2] = 0.0;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
void Points::insertBar()
{
//init a grid or pars
const uint X=30;
const uint Y=4;
const uint Z=64;
float step = 10.0/32.0;
GLfloat pos[2*X*Y*Z*3];
GLfloat vals[2*X*Y*Z];
GLuint states[2*X*Y*Z];
GLfloat velos[2*X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = -5.2+ step * i;
pos[parIdx++] = -10 + step * j;
pos[parIdx++] = -15 + step * k;
pos[parIdx++] = 5.2- step * i;
pos[parIdx++] = -10 + step * j;
pos[parIdx++] = -15 + step * k;
}
for (uint i=0; i<X*Y*Z*2; i++)
{
vals[i] = 30.0;
states[i] = 1;
velos[3*i+0] = 0.0;
velos[3*i+1] = 0.0;
velos[3*i+2] = 0.0;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
| b52063c97a707ed4c2618bd3352aa2113b5c6d70.cu | #include <objects/point/points.cuh>
#include <objects/point/kernels.cu>
#include <core/ImguiConfig.hpp>
void Points::init()
{
isPing = true;
//Generating buffers
glGenBuffers (BUFFER_COUNT, BUFFER);
std::cout<<glGetError()<<std::endl;
//init buffers
uint *tempUint = new uint[MAX_NUM_POINTS];
for (uint i = BUFFER_INDICES; i<= BUFFER_CELLID_PONG; i++)
{
glBindBuffer (GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(uint), tempUint, GL_DYNAMIC_DRAW);
}
delete tempUint;
float *tempFloat = new float[MAX_NUM_POINTS*3];
for (uint i=BUFFER_POSITIONS_PING; i<=BUFFER_NORMALS; i++)
{
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(float) * 3, tempFloat, GL_DYNAMIC_DRAW);
}
delete tempFloat;
for (uint i=BUFFER_DENSITIES_PING; i<=BUFFER_FREEZEPOINT; i++)
{
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[i]);
glBufferData (GL_ARRAY_BUFFER, MAX_NUM_POINTS * sizeof(float) , 0, GL_DYNAMIC_DRAW);
}
std::cout<<glGetError()<<std::endl;
//Setting Ping VAO
glGenVertexArrays (1, &pingVAO);
glBindVertexArray (pingVAO);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS_PING]);
glEnableVertexAttribArray (0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP_PING]);
glEnableVertexAttribArray (1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_COLORGRAD_PING]);
glEnableVertexAttribArray (2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_DENSITIES_PING]);
glEnableVertexAttribArray (3);
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 0, NULL);
//Setting Pong VAO;
glGenVertexArrays (1, &pongVAO);
glBindVertexArray (pongVAO);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS_PONG]);
glEnableVertexAttribArray (0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP_PONG]);
glEnableVertexAttribArray (1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_COLORGRAD_PONG]);
glEnableVertexAttribArray (2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer( GL_ARRAY_BUFFER, BUFFER[BUFFER_DENSITIES_PONG]);
glEnableVertexAttribArray (3);
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, 0, NULL);
//load temperature texture
loadTempText();
//Register CUDA resources
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk( cudaGraphicsGLRegisterBuffer(&(resources[i]), BUFFER[i], cudaGraphicsRegisterFlagsWriteDiscard));
//allocate space for constants
//Allocate and init grid
gpuErrchk(cudaMalloc((void**)&(grid.gridRes), sizeof(uint3)));
gpuErrchk(cudaMalloc((void**)&(grid.startPoint), sizeof(float3)));
gpuErrchk(cudaMalloc((void**)&(grid.cellSize), sizeof(float3)));
grid.gridRes = make_uint3(16, 16, 16);
grid.startPoint = make_float3(-15, -15, -15);
grid.cellSize = make_float3( -grid.startPoint.x*2/grid.gridRes.x, -grid.startPoint.y*2/grid.gridRes.y, -grid.startPoint.z*2/grid.gridRes.z);
gpuErrchk(cudaMalloc((void**)&(grid.count), sizeof(uint)*grid.gridRes.x*grid.gridRes.y*grid.gridRes.z));
gpuErrchk(cudaMalloc((void**)&(grid.firstIdx), sizeof(uint)*grid.gridRes.x*grid.gridRes.y*grid.gridRes.z));
gpuErrchk(cudaMalloc((void**)&dSumArray, sizeof(uint)
*(grid.gridRes.x * grid.gridRes.y * grid.gridRes.z -1)/2048+1)); //dead param
gpuErrchk(cudaMalloc((void**)&dDensSum, sizeof(float)));
gpuErrchk(cudaMalloc((void**)&dDensCount, sizeof(int)));
gpuErrchk(cudaMalloc((void**)&tempPos, sizeof(float)*3*MAX_NUM_POINTS));
}
void Points::setValues ( const GLfloat *val, const GLuint &num, const GLuint &size, const int &bufferIdx)
{
numPoints = num;
gpuErrchk(cudaMemcpy(dNumPoints, &numPoints, sizeof(uint), cudaMemcpyHostToDevice));
if ( numPoints > MAX_NUM_POINTS)
exit(-1);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[bufferIdx]);
glBufferSubData(GL_ARRAY_BUFFER, 0, num*size*sizeof(GLfloat), val);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Points::addPars( const GLfloat *pos, const GLfloat *temp, const GLfloat *velos, const GLuint &num, const GLuint *s)
{
if ( numPoints+num >= MAX_NUM_POINTS)
{
std::cout<<"To many particles\n";
exit(-1);
}
int BUFFER_POSITIONS = 0;
int BUFFER_TEMP = 0;
int BUFFER_STATES = 0;
int BUFFER_VELOCITIES = 0;
if (isPing)
{
BUFFER_POSITIONS = BUFFER_POSITIONS_PING;
BUFFER_TEMP = BUFFER_TEMP_PING;
BUFFER_STATES = BUFFER_STATES_PING;
BUFFER_VELOCITIES = BUFFER_VELOCITIES_PING;
}
else
{
BUFFER_POSITIONS = BUFFER_POSITIONS_PONG;
BUFFER_TEMP = BUFFER_TEMP_PONG;
BUFFER_STATES = BUFFER_STATES_PONG;
BUFFER_VELOCITIES = BUFFER_VELOCITIES_PONG;
}
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_POSITIONS]);
glBufferSubData(GL_ARRAY_BUFFER, 3*numPoints*sizeof(GLfloat), 3*num*sizeof(GLfloat), pos);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_TEMP]);
glBufferSubData(GL_ARRAY_BUFFER, numPoints*sizeof(GLfloat), num*sizeof(GLfloat), temp);
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_STATES]);
glBufferSubData(GL_ARRAY_BUFFER, numPoints*sizeof(GLuint), num*sizeof(GLuint), s);
//set velo to zero
glBindBuffer(GL_ARRAY_BUFFER, BUFFER[BUFFER_VELOCITIES]);
glBufferSubData(GL_ARRAY_BUFFER, 3*numPoints*sizeof(GLfloat), 3*num*sizeof(GLfloat), velos);
glBindBuffer(GL_ARRAY_BUFFER, 0);
numPoints += num;
}
void Points::update()
{
runKernels();
float GPUsum;
int GPUcount;
gpuErrchk(cudaMemcpy(&GPUsum, dDensSum, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&GPUcount, dDensCount, sizeof(int), cudaMemcpyDeviceToHost));
surfaceDens = GPUsum / GPUcount;
}
void Points::draw()
{
if (isPing)
glBindVertexArray(pingVAO);
else
glBindVertexArray(pongVAO);
glDrawArrays( GL_POINTS, 0, numPoints);
glBindVertexArray(0);
isPing = !isPing; //swap buffer
}
void Points::loadTempText() //load texture with il
{
ILuint imageID; // Create an image ID as a ULuint
ilInit();
GLuint textureID; // Create a texture ID as a GLuint
ILboolean success; // Create a flag to keep track of success/failure
ILenum error; // Create a flag to keep track of the IL error state
ilGenImages(1, &imageID); // Generate the image ID
ilBindImage(imageID); // Bind the image
success = ilLoadImage("./assets/temperature.png"); // Load the image file
// If we managed to load the image, then we can start to do things with it...
if (success)
{
// If the image is flipped (i.e. upside-down and mirrored, flip it the right way up!)
ILinfo ImageInfo;
iluGetImageInfo(&ImageInfo);
if (ImageInfo.Origin == IL_ORIGIN_UPPER_LEFT)
{
iluFlipImage();
}
// Convert the image into a suitable format to work with
// NOTE: If your image contains alpha channel you can replace IL_RGB with IL_RGBA
success = ilConvertImage(IL_RGB, IL_UNSIGNED_BYTE);
// Quit out if we failed the conversion
if (!success)
{
error = ilGetError();
std::cout << "Image conversion failed - IL reports error: " << error << " - " << iluErrorString(error) << std::endl;
exit(-1);
}
// Generate a new texture
glGenTextures(1, &textureID);
// Bind the texture to a name
glBindTexture(GL_TEXTURE_2D, textureID);
// Set texture clamping method
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Set texture interpolation method to use linear interpolation (no MIPMAPS)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// Specify the texture specification
glTexImage2D(GL_TEXTURE_2D, // Type of texture
0, // Pyramid level (for mip-mapping) - 0 is the top level
ilGetInteger(IL_IMAGE_FORMAT), // Internal pixel format to use. Can be a generic type like GL_RGB or GL_RGBA, or a sized type
ilGetInteger(IL_IMAGE_WIDTH), // Image width
ilGetInteger(IL_IMAGE_HEIGHT), // Image height
0, // Border width in pixels (can either be 1 or 0)
ilGetInteger(IL_IMAGE_FORMAT), // Format of image pixel data
GL_UNSIGNED_BYTE, // Image data type
ilGetData()); // The actual image data itself
}
else // If we failed to open the image file in the first place...
{
error = ilGetError();
std::cout << "Image load failed - IL reports error: " << error << " - " << iluErrorString(error) << std::endl;
exit(-1);
}
ilDeleteImages(1, &imageID); // Because we have already copied image data into texture data we can release memory used by image.
std::cout << "Texture creation successful." << std::endl;
tex1 = textureID; // Return the GLuint to the texture so you can use it!
}
void Points::runKernelsPCISPH()
{
cudaMemset(dDensSum, 0, sizeof(float));
cudaMemset(dDensCount, 0, sizeof(int));
if (numPoints == 0)
return;
//get pointers
for (int i=0; i<BUFFER_COUNT; i++)
{
gpuErrchk(cudaGraphicsMapResources(1, &(resources[i])));
gpuErrchk(cudaGraphicsResourceGetMappedPointer( &(dPointers[i]), &(numBytes[i]), resources[i]));
}
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
if (!isPing)
{
//swap ping pong pointers
Points::swapPtr(dPointers[BUFFER_STATES_PING], dPointers[BUFFER_STATES_PONG]);
Points::swapPtr(dPointers[BUFFER_CELLID_PING], dPointers[BUFFER_CELLID_PONG]);
Points::swapPtr(dPointers[BUFFER_POSITIONS_PING], dPointers[BUFFER_POSITIONS_PONG]);
Points::swapPtr(dPointers[BUFFER_VELOCITIES_PING], dPointers[BUFFER_VELOCITIES_PONG]);
Points::swapPtr(dPointers[BUFFER_TEMP_PING], dPointers[BUFFER_TEMP_PONG]);
Points::swapPtr(dPointers[BUFFER_COLORGRAD_PING], dPointers[BUFFER_COLORGRAD_PONG]);
Points::swapPtr(dPointers[BUFFER_DENSITIES_PING], dPointers[BUFFER_DENSITIES_PONG]);
}
resetGrid<<<16*16, 16>>>
(grid.count,
grid.gridRes);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk(cudaMemcpy(dNumPoints, &numPoints, sizeof(uint), cudaMemcpyHostToDevice));
insertPars<<<max((numPoints-1)/1024+1, 1), 1024>>>
((float3*)dPointers[BUFFER_POSITIONS_PING],
(uint*)dPointers[BUFFER_CELLID_PING],
dNumPoints,
(uint*)dPointers[BUFFER_OFFSET],
grid.startPoint,
grid.cellSize,
grid.gridRes,
grid.count,
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
prefixSumKernel<<<2,1024>>>(grid.count,grid.firstIdx,(uint*)dSumArray, grid.gridRes.x*grid.gridRes.y*grid.gridRes.z);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
prefexSumPostShort<<<2, 1024>>>(grid.firstIdx, (uint*)dSumArray, 2);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
countingSort<<<max((numPoints-1)/1024+1, 1), 1024>>>(
grid.firstIdx,
(uint*)dPointers[BUFFER_OFFSET],
(uint*)dPointers[BUFFER_CELLID_PING],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.count,
(float3*)dPointers[BUFFER_VELOCITIES_PING],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_POSITIONS_PING],
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(States*)dPointers[BUFFER_STATES_PING],
(States*)dPointers[BUFFER_STATES_PONG],
(float*)dPointers[BUFFER_TEMP_PING],
(float*)dPointers[BUFFER_TEMP_PONG],
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk(cudaMemcpy(&numPoints, dNumPoints, sizeof(uint), cudaMemcpyDeviceToHost));
if (numPoints != 0)
{
calculateDensitiesGlobal<<<max((numPoints-1)/1024+1, 1), 1024>>>(
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.firstIdx,
grid.count,
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
//compute external forces
}
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk(cudaGraphicsUnmapResources(1, &(resources[i])));
}
void Points::runKernels()
{
if (numPoints == 0)
return;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//get pointers
for (int i=0; i<BUFFER_COUNT; i++)
{
gpuErrchk(cudaGraphicsMapResources(1, &(resources[i])));
gpuErrchk(cudaGraphicsResourceGetMappedPointer( &(dPointers[i]), &(numBytes[i]), resources[i]));
}
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Num of Points\t %d", numPoints);
ImGui::Text("Mapping\t %f",elapsedTime);
/*ping:
ping -sort-> pong -> render
pong
pong -sort-> ping -> render
*/
if (!isPing)
{
//swap ping pong pointers
Points::swapPtr(dPointers[BUFFER_STATES_PING], dPointers[BUFFER_STATES_PONG]);
Points::swapPtr(dPointers[BUFFER_CELLID_PING], dPointers[BUFFER_CELLID_PONG]);
Points::swapPtr(dPointers[BUFFER_POSITIONS_PING], dPointers[BUFFER_POSITIONS_PONG]);
Points::swapPtr(dPointers[BUFFER_VELOCITIES_PING], dPointers[BUFFER_VELOCITIES_PONG]);
Points::swapPtr(dPointers[BUFFER_TEMP_PING], dPointers[BUFFER_TEMP_PONG]);
Points::swapPtr(dPointers[BUFFER_COLORGRAD_PING], dPointers[BUFFER_COLORGRAD_PONG]);
Points::swapPtr(dPointers[BUFFER_DENSITIES_PING], dPointers[BUFFER_DENSITIES_PONG]);
}
cudaEventRecord(start, 0);
resetGrid<<<16*16, 16>>>
(grid.count,
grid.gridRes);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Reset Grid\t %f",elapsedTime);
//insert particles and count
//std::cout<<"Inserting particles...\n";
gpuErrchk(cudaMemcpy(dNumPoints, &numPoints, sizeof(uint), cudaMemcpyHostToDevice));
cudaEventRecord(start, 0);
insertPars<<<max((numPoints-1)/1024+1, 1), 1024>>>
((float3*)dPointers[BUFFER_POSITIONS_PING],
(uint*)dPointers[BUFFER_CELLID_PING],
dNumPoints,
(uint*)dPointers[BUFFER_OFFSET],
grid.startPoint,
grid.cellSize,
grid.gridRes,
grid.count,
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Insert Pars\t %f",elapsedTime);
//do prefix sum on particles
cudaEventRecord(start, 0);
prefixSumKernel<<<2,1024>>>(grid.count,grid.firstIdx,(uint*)dSumArray, grid.gridRes.x*grid.gridRes.y*grid.gridRes.z);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
prefexSumPostShort<<<2, 1024>>>(grid.firstIdx, (uint*)dSumArray, 2);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
//std::cout<<"counting sort...\n";
countingSort<<<max((numPoints-1)/1024+1, 1), 1024>>>(
grid.firstIdx,
(uint*)dPointers[BUFFER_OFFSET],
(uint*)dPointers[BUFFER_CELLID_PING],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.count,
(float3*)dPointers[BUFFER_VELOCITIES_PING],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_POSITIONS_PING],
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(States*)dPointers[BUFFER_STATES_PING],
(States*)dPointers[BUFFER_STATES_PONG],
(float*)dPointers[BUFFER_TEMP_PING],
(float*)dPointers[BUFFER_TEMP_PONG],
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Counting sort\t %f",elapsedTime);
//std::cout<<"Copying data back...\n";
gpuErrchk(cudaMemcpy(&numPoints, dNumPoints, sizeof(uint), cudaMemcpyDeviceToHost));
if (numPoints != 0)
{
//===============++Densities++=====================
//std::cout<<"Calculating densities... numOfPar: "<<max((numPoints-1)/1024+1, 1)<<std::endl;;
cudaEventRecord(start, 0);
calculateDensitiesGlobal<<<max((numPoints-1)/1024+1, 1), 1024>>>(
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
grid.firstIdx,
grid.count,
numPoints);
//calculateDensitiesShared<<<16*16*16, 100>>>(
// (float3*)dPointers[BUFFER_POSITIONS_PONG],
// (float*)dPointers[BUFFER_DENSITIES],
// (uint*)dPointers[BUFFER_CELLID_PONG],
// grid.firstIdx,
// grid.count,
// numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Densities\t %f",elapsedTime);
//Color grad
//cudaEventRecord(start, 0);
// computeColorGradsGlobal<<<max((numPoints-1)/1024+1, 1), 1024>>>(
// (float3*)dPointers[BUFFER_POSITIONS_PONG],
// (float*)dPointers[BUFFER_DENSITIES_PONG],
// (float3*)dPointers[BUFFER_COLORGRAD_PONG],
// (float*)dPointers[BUFFER_TEMP_PONG],
// (float*)dDensSum,
// (uint*)dDensCount,
// (uint*)dPointers[BUFFER_STATES_PONG],
// (uint*)dPointers[BUFFER_CELLID_PONG],
// grid.firstIdx,
// grid.count,
// numPoints
// );
// gpuErrchk( cudaPeekAtLastError() );
// gpuErrchk( cudaDeviceSynchronize() );
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime, start, stop);
//ImGui::Text("Color Grad\t %f",elapsedTime);
//=============++Forces++=======================
//std::cout<<"Computing forces...\n";
#define FORCE_BLOCK_DIM 512
cudaEventRecord(start, 0);
calculateForcesGlobal<<<max((numPoints-1)/FORCE_BLOCK_DIM+1,1), FORCE_BLOCK_DIM>>>(
(float3*)dPointers[BUFFER_POSITIONS_PONG],
(float*)dPointers[BUFFER_DENSITIES_PONG],
(float3*)dPointers[BUFFER_VELOCITIES_PONG],
(float3*)dPointers[BUFFER_PRESSURES],
(float3*)dPointers[BUFFER_COLORGRAD_PONG],
(uint*)dPointers[BUFFER_CELLID_PONG],
(uint*)dPointers[BUFFER_STATES_PONG],
(uint*)grid.firstIdx,
(uint*)grid.count,
(float3*)dPointers[BUFFER_NORMALS],
numPoints);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
ImGui::Text("Forces\t %f",elapsedTime);
}
//Unmap pointers;
for (int i=0; i<BUFFER_COUNT; i++)
gpuErrchk(cudaGraphicsUnmapResources(1, &(resources[i])));
}
void Points::insertParsFromOBJ(std::string fileName, float temperature, GLuint state)
{
std::cout<<"Inserting OBJ "<<fileName<<std::endl;
std::vector<GLfloat> vertices;
std::vector<GLfloat> vals;
std::vector<GLuint> states;
std::vector<GLfloat> velos;
std::string str;
std::ifstream inFile(fileName);
while(std::getline(inFile, str))
{
splitstring split(str);
vector<std::string> splited = split.split(' ');
if (splited.size() > 0)
if (splited[0] == "v")
for (int i=1; i<splited.size(); i++)
vertices.push_back(std::stof(splited[i]));
}
for (auto i=0; i<vertices.size()/3; i++)
{
vals.push_back(temperature);
states.push_back(state);
velos.push_back(0.0);
velos.push_back(0.0);
velos.push_back(0.0);
}
std::cout<<states.size()<<std::endl;;
std::cout<<"Num vertices: "<<vertices.size()<<std::endl;
addPars(&vertices[0], &vals[0], &velos[0], states.size(), &states[0]);
}
void Points::insertBowl(const glm::vec3 ¢er, const glm::vec3 &dir)
{
const float radius = 1.0;
const uint X=16;
const uint Y=16;
const uint Z=16;
const float step = 10.0/32.0;
std::vector<GLfloat> pos;
std::vector<GLfloat> vals;
std::vector<GLuint> states;
std::vector<GLfloat> velos;
pos.reserve(X*Y*Z*3);
vals.reserve(X*Y*Z);
states.reserve(X*Y*Z);
velos.reserve(X*Y*Z*3);
GLuint numPar = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
float r = (step*i-2.5)*(step*i-2.5)+
(step*j-2.5)*(step*j-2.5)+
(step*k-2.5)*(step*k-2.5);
if (
r <
radius * radius
&&
r > radius * radius -0.5
&&
step*j-2.5 < 0.0
){
pos.push_back(center.x-2.5 + step * i);
pos.push_back(center.y-2.5 + step * j);
pos.push_back(center.z-2.5 + step * k);
vals.push_back(80.0);
states.push_back(0);
velos.push_back(dir.x*200);
velos.push_back(dir.y*200);
velos.push_back(dir.z*200);
numPar++;
}
}
addPars(&pos[0], &vals[0], &velos[0], numPar, &states[0]);
}
void Points::insertCUBE(const glm::vec3 ¢er, const glm::vec3 &dir)
{
//init a grid or pars
const uint X=20;
const uint Y=40;
const uint Z=40;
float step = 10.0/16.0;
GLfloat pos[X*Y*Z*3];
GLfloat vals[X*Y*Z];
GLuint states[X*Y*Z];
GLfloat velos[X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = center.x-7 + step * i;
pos[parIdx++] = center.y-14 + step * j;
pos[parIdx++] = center.z-14 + step * k;
}
for (uint i=0; i<X*Y*Z; i++)
{
vals[i] = 80.0;
states[i] = 0;
velos[3*i+0] = dir.x*100;
velos[3*i+1] = dir.y*100;
velos[3*i+2] = dir.z*100;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
void Points::insertCUBEE()
{
//init a grid or pars
const uint X=64;
const uint Y=4;
const uint Z=64;
float step = 10.0/16.0;
GLfloat pos[X*Y*Z*3];
GLfloat vals[X*Y*Z];
GLuint states[X*Y*Z];
GLfloat velos[X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = -20 + step * i;
pos[parIdx++] = -20 + step * k;
pos[parIdx++] = -10 + step * j;
}
for (uint i=0; i<X*Y*Z; i++)
{
vals[i] = 10.0;
states[i] = 0;
velos[3*i+0] = 0.0;
velos[3*i+1] = 0.0;
velos[3*i+2] = 0.0;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
void Points::insertBar()
{
//init a grid or pars
const uint X=30;
const uint Y=4;
const uint Z=64;
float step = 10.0/32.0;
GLfloat pos[2*X*Y*Z*3];
GLfloat vals[2*X*Y*Z];
GLuint states[2*X*Y*Z];
GLfloat velos[2*X*Y*Z*3];
uint parIdx = 0;
for (uint i=0; i<X; i++)
for (uint j=0; j<Y; j++)
for (uint k=0; k<Z; k++)
{
pos[parIdx++] = -5.2+ step * i;
pos[parIdx++] = -10 + step * j;
pos[parIdx++] = -15 + step * k;
pos[parIdx++] = 5.2- step * i;
pos[parIdx++] = -10 + step * j;
pos[parIdx++] = -15 + step * k;
}
for (uint i=0; i<X*Y*Z*2; i++)
{
vals[i] = 30.0;
states[i] = 1;
velos[3*i+0] = 0.0;
velos[3*i+1] = 0.0;
velos[3*i+2] = 0.0;
}
addPars(pos, vals,velos, X*Y*Z, states);
}
|
90199fb78ce26702ec7b71a1bdc95602e99d2baa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y) (x+xdim0_generate_chunk_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_generate_chunk_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_generate_chunk_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_generate_chunk_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_generate_chunk_kernel*(y))
#define OPS_ACC5(x,y) (x+xdim5_generate_chunk_kernel*(y))
#define OPS_ACC6(x,y) (x+xdim6_generate_chunk_kernel*(y))
//user function
__device__
void generate_chunk_kernel_gpu( const double *vertexx, const double *vertexy,
double *energy0, double *density0,
double *u0,
const double *cellx, const double *celly) {
double radius, x_cent, y_cent;
int is_in = 0;
int is_in2 = 0;
energy0[OPS_ACC2(0,0)]= states[0].energy;
density0[OPS_ACC3(0,0)]= states[0].density;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
is_in = 0;
is_in2 = 0;
if (states[i].geometry == g_rect) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
if(vertexx[OPS_ACC0(1+i1,0)] >= states[i].xmin && vertexx[OPS_ACC0(0+i1,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1+j1)] >= states[i].ymin && vertexy[OPS_ACC1(0,0+j1)] < states[i].ymax) {
is_in = 1;
}
}
}
}
if(vertexx[OPS_ACC0(1,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1)] >= states[i].ymin && vertexy[OPS_ACC1(0,0)] < states[i].ymax) {
is_in2 = 1;
}
}
if (is_in2) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
else if(states[i].geometry == g_circ) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
radius = sqrt ((cellx[OPS_ACC5(i1,0)] - x_cent) * (cellx[OPS_ACC5(i1,0)] - x_cent) +
(celly[OPS_ACC6(0,j1)] - y_cent) * (celly[OPS_ACC6(0,j1)] - y_cent));
if (radius <= states[i].radius) {
is_in = 1;
}
}
}
if (radius <= states[i].radius) is_in2 = 1;
if (is_in2) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
else if(states[i].geometry == g_point) {
if(vertexx[OPS_ACC0(0,0)] == x_cent && vertexy[OPS_ACC1(0,0)] == y_cent) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
}
u0[OPS_ACC4(0,0)] = energy0[OPS_ACC2(0,0)] * density0[OPS_ACC3(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
const double* __restrict arg5,
const double* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_generate_chunk_kernel;
arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_generate_chunk_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_generate_chunk_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_generate_chunk_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_generate_chunk_kernel;
arg5 += idx_x * 1*1 + idx_y * 0*1 * xdim5_generate_chunk_kernel;
arg6 += idx_x * 0*1 + idx_y * 1*1 * xdim6_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1) {
generate_chunk_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != xdim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h) {
hipMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
hipMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
hipMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
hipMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
hipMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
hipMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
hipMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_generate_chunk_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 90199fb78ce26702ec7b71a1bdc95602e99d2baa.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_generate_chunk_kernel;
int xdim0_generate_chunk_kernel_h = -1;
__constant__ int xdim1_generate_chunk_kernel;
int xdim1_generate_chunk_kernel_h = -1;
__constant__ int xdim2_generate_chunk_kernel;
int xdim2_generate_chunk_kernel_h = -1;
__constant__ int xdim3_generate_chunk_kernel;
int xdim3_generate_chunk_kernel_h = -1;
__constant__ int xdim4_generate_chunk_kernel;
int xdim4_generate_chunk_kernel_h = -1;
__constant__ int xdim5_generate_chunk_kernel;
int xdim5_generate_chunk_kernel_h = -1;
__constant__ int xdim6_generate_chunk_kernel;
int xdim6_generate_chunk_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y) (x+xdim0_generate_chunk_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_generate_chunk_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_generate_chunk_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_generate_chunk_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_generate_chunk_kernel*(y))
#define OPS_ACC5(x,y) (x+xdim5_generate_chunk_kernel*(y))
#define OPS_ACC6(x,y) (x+xdim6_generate_chunk_kernel*(y))
//user function
__device__
void generate_chunk_kernel_gpu( const double *vertexx, const double *vertexy,
double *energy0, double *density0,
double *u0,
const double *cellx, const double *celly) {
double radius, x_cent, y_cent;
int is_in = 0;
int is_in2 = 0;
energy0[OPS_ACC2(0,0)]= states[0].energy;
density0[OPS_ACC3(0,0)]= states[0].density;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
is_in = 0;
is_in2 = 0;
if (states[i].geometry == g_rect) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
if(vertexx[OPS_ACC0(1+i1,0)] >= states[i].xmin && vertexx[OPS_ACC0(0+i1,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1+j1)] >= states[i].ymin && vertexy[OPS_ACC1(0,0+j1)] < states[i].ymax) {
is_in = 1;
}
}
}
}
if(vertexx[OPS_ACC0(1,0)] >= states[i].xmin && vertexx[OPS_ACC0(0,0)] < states[i].xmax) {
if(vertexy[OPS_ACC1(0,1)] >= states[i].ymin && vertexy[OPS_ACC1(0,0)] < states[i].ymax) {
is_in2 = 1;
}
}
if (is_in2) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
else if(states[i].geometry == g_circ) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
radius = sqrt ((cellx[OPS_ACC5(i1,0)] - x_cent) * (cellx[OPS_ACC5(i1,0)] - x_cent) +
(celly[OPS_ACC6(0,j1)] - y_cent) * (celly[OPS_ACC6(0,j1)] - y_cent));
if (radius <= states[i].radius) {
is_in = 1;
}
}
}
if (radius <= states[i].radius) is_in2 = 1;
if (is_in2) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
else if(states[i].geometry == g_point) {
if(vertexx[OPS_ACC0(0,0)] == x_cent && vertexy[OPS_ACC1(0,0)] == y_cent) {
energy0[OPS_ACC2(0,0)] = states[i].energy;
density0[OPS_ACC3(0,0)] = states[i].density;
}
}
}
u0[OPS_ACC4(0,0)] = energy0[OPS_ACC2(0,0)] * density0[OPS_ACC3(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_generate_chunk_kernel(
const double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
const double* __restrict arg5,
const double* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_generate_chunk_kernel;
arg1 += idx_x * 0*1 + idx_y * 1*1 * xdim1_generate_chunk_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_generate_chunk_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_generate_chunk_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_generate_chunk_kernel;
arg5 += idx_x * 1*1 + idx_y * 0*1 * xdim5_generate_chunk_kernel;
arg6 += idx_x * 0*1 + idx_y * 1*1 * xdim6_generate_chunk_kernel;
if (idx_x < size0 && idx_y < size1) {
generate_chunk_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != xdim0_generate_chunk_kernel_h || xdim1 != xdim1_generate_chunk_kernel_h || xdim2 != xdim2_generate_chunk_kernel_h || xdim3 != xdim3_generate_chunk_kernel_h || xdim4 != xdim4_generate_chunk_kernel_h || xdim5 != xdim5_generate_chunk_kernel_h || xdim6 != xdim6_generate_chunk_kernel_h) {
cudaMemcpyToSymbol( xdim0_generate_chunk_kernel, &xdim0, sizeof(int) );
xdim0_generate_chunk_kernel_h = xdim0;
cudaMemcpyToSymbol( xdim1_generate_chunk_kernel, &xdim1, sizeof(int) );
xdim1_generate_chunk_kernel_h = xdim1;
cudaMemcpyToSymbol( xdim2_generate_chunk_kernel, &xdim2, sizeof(int) );
xdim2_generate_chunk_kernel_h = xdim2;
cudaMemcpyToSymbol( xdim3_generate_chunk_kernel, &xdim3, sizeof(int) );
xdim3_generate_chunk_kernel_h = xdim3;
cudaMemcpyToSymbol( xdim4_generate_chunk_kernel, &xdim4, sizeof(int) );
xdim4_generate_chunk_kernel_h = xdim4;
cudaMemcpyToSymbol( xdim5_generate_chunk_kernel, &xdim5, sizeof(int) );
xdim5_generate_chunk_kernel_h = xdim5;
cudaMemcpyToSymbol( xdim6_generate_chunk_kernel, &xdim6, sizeof(int) );
xdim6_generate_chunk_kernel_h = xdim6;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_generate_chunk_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
7f77f2ef2092f0f40a21b8187d4e6598befb1037.hip | // !!! This is a file automatically generated by hipify!!!
#include "stereo.hpp"
#include "stereosgm.hpp"
#include "../costs/stable.hpp"
#include "../costs/dual.hpp"
#include <opencv2/cudawarping.hpp>
#include <opencv2/cudafilters.hpp>
#include <opencv2/highgui.hpp>
typedef MultiCostsWeighted<StableMatchingCost,3> MatchingCost;
static void variance_mask(cv::InputArray in, cv::OutputArray out, int wsize=3) {
if (in.isGpuMat() && out.isGpuMat()) {
cv::cuda::GpuMat im;
cv::cuda::GpuMat im2;
cv::cuda::GpuMat mean;
cv::cuda::GpuMat mean2;
mean.create(in.size(), CV_32FC1);
mean2.create(in.size(), CV_32FC1);
im2.create(in.size(), CV_32FC1);
if (in.type() != CV_32FC1) {
in.getGpuMat().convertTo(im, CV_32FC1);
}
else {
im = in.getGpuMat();
}
cv::cuda::multiply(im, im, im2);
auto filter = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize));
filter->apply(im, mean); // E[X]
filter->apply(im2, mean2); // E[X^2]
cv::cuda::multiply(mean, mean, mean); // (E[X])^2
// NOTE: floating point accuracy in subtraction
// (cv::cuda::createBoxFilter only supports 8 bit integer types)
cv::cuda::subtract(mean2, mean, out.getGpuMatRef()); // E[X^2] - (E[X])^2
}
else { throw std::exception(); /* todo CPU version */ }
}
struct StereoHStableSgm::Impl : public StereoSgm<MatchingCost, StereoHStableSgm::Parameters> {
StableMatchingCost cost_fine;
StableMatchingCost cost_medium;
StableMatchingCost cost_coarse;
Array2D<uchar> l;
Array2D<uchar> r;
Array2D<float> var_fine;
Array2D<float> var_medium;
Array2D<float> var_coarse;
Impl(StereoHStableSgm::Parameters ¶ms, int width, int height, int dmin, int dmax) :
StereoSgm(params, width, height, dmin, dmax),
cost_fine(width, height, dmin, dmax),
cost_medium(width, height, dmin, dmax),
cost_coarse(width, height, dmin, dmax),
l(width, height), r(width, height),
var_fine(width, height),
var_medium(width, height),
var_coarse(width, height) {
cost.add(0, cost_fine, var_fine);
cost.add(1, cost_medium, var_medium);
cost.add(2, cost_coarse, var_coarse);
}
};
StereoHStableSgm::StereoHStableSgm() : impl_(nullptr) {
impl_ = new Impl(params, 0, 0, 0, 0);
}
void StereoHStableSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
//hipSetDevice(0);
if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(params, l.cols(), l.rows(), params.d_min, params.d_max);
}
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
static constexpr int DOWNSCALE_MEDIUM = 4;
static constexpr int DOWNSCALE_COARSE = 6;
Array2D<uchar> medium_l(l.cols()/DOWNSCALE_MEDIUM, l.rows()/DOWNSCALE_MEDIUM);
Array2D<uchar> medium_r(r.cols()/DOWNSCALE_MEDIUM, r.rows()/DOWNSCALE_MEDIUM);
Array2D<uchar> coarse_l(l.cols()/DOWNSCALE_COARSE, l.rows()/DOWNSCALE_COARSE);
Array2D<uchar> coarse_r(r.cols()/DOWNSCALE_COARSE, r.rows()/DOWNSCALE_COARSE);
cv::cuda::resize(impl_->l.toGpuMat(), medium_l.toGpuMat(), cv::Size(medium_l.width, medium_r.height));
cv::cuda::resize(impl_->r.toGpuMat(), medium_r.toGpuMat(), cv::Size(medium_r.width, medium_r.height));
cv::cuda::resize(impl_->l.toGpuMat(), coarse_l.toGpuMat(), cv::Size(coarse_l.width, coarse_l.height));
cv::cuda::resize(impl_->r.toGpuMat(), coarse_r.toGpuMat(), cv::Size(coarse_r.width, coarse_r.height));
cv::cuda::GpuMat var_fine = impl_->var_fine.toGpuMat();
variance_mask(impl_->l.toGpuMat(), var_fine, params.var_window);
cv::cuda::normalize(var_fine, var_fine, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::GpuMat var_medium; // = impl_->var_medium.toGpuMat();
variance_mask(medium_l.toGpuMat(), var_medium, params.var_window);
cv::cuda::normalize(var_medium, var_medium, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::resize(var_medium, impl_->var_medium.toGpuMat(), cv::Size(l.cols(), l.rows()));
cv::cuda::GpuMat var_coarse; // = impl_->var_coarse.toGpuMat();
variance_mask(coarse_l.toGpuMat(), var_coarse, params.var_window);
cv::cuda::normalize(var_coarse, var_coarse, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::resize(var_coarse, impl_->var_coarse.toGpuMat(), cv::Size(l.cols(), l.rows()));
cv::Mat tmp;
impl_->var_coarse.toGpuMat().download(tmp);
cv::imshow("Var", tmp);
impl_->cost_fine.generateFilterMask(params.wsize, 16);
impl_->cost_medium.setFilter(impl_->cost_fine.getFilter());
impl_->cost_coarse.setFilter(impl_->cost_fine.getFilter());
impl_->cost_fine.set(impl_->l, impl_->r);
impl_->cost_medium.set(medium_l, medium_r, l.cols(), l.rows());
impl_->cost_coarse.set(coarse_l, coarse_r, l.cols(), l.rows());
impl_->cost.set();
impl_->compute(disparity);
}
StereoHStableSgm::~StereoHStableSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
| 7f77f2ef2092f0f40a21b8187d4e6598befb1037.cu | #include "stereo.hpp"
#include "stereosgm.hpp"
#include "../costs/stable.hpp"
#include "../costs/dual.hpp"
#include <opencv2/cudawarping.hpp>
#include <opencv2/cudafilters.hpp>
#include <opencv2/highgui.hpp>
typedef MultiCostsWeighted<StableMatchingCost,3> MatchingCost;
static void variance_mask(cv::InputArray in, cv::OutputArray out, int wsize=3) {
if (in.isGpuMat() && out.isGpuMat()) {
cv::cuda::GpuMat im;
cv::cuda::GpuMat im2;
cv::cuda::GpuMat mean;
cv::cuda::GpuMat mean2;
mean.create(in.size(), CV_32FC1);
mean2.create(in.size(), CV_32FC1);
im2.create(in.size(), CV_32FC1);
if (in.type() != CV_32FC1) {
in.getGpuMat().convertTo(im, CV_32FC1);
}
else {
im = in.getGpuMat();
}
cv::cuda::multiply(im, im, im2);
auto filter = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize));
filter->apply(im, mean); // E[X]
filter->apply(im2, mean2); // E[X^2]
cv::cuda::multiply(mean, mean, mean); // (E[X])^2
// NOTE: floating point accuracy in subtraction
// (cv::cuda::createBoxFilter only supports 8 bit integer types)
cv::cuda::subtract(mean2, mean, out.getGpuMatRef()); // E[X^2] - (E[X])^2
}
else { throw std::exception(); /* todo CPU version */ }
}
struct StereoHStableSgm::Impl : public StereoSgm<MatchingCost, StereoHStableSgm::Parameters> {
StableMatchingCost cost_fine;
StableMatchingCost cost_medium;
StableMatchingCost cost_coarse;
Array2D<uchar> l;
Array2D<uchar> r;
Array2D<float> var_fine;
Array2D<float> var_medium;
Array2D<float> var_coarse;
Impl(StereoHStableSgm::Parameters ¶ms, int width, int height, int dmin, int dmax) :
StereoSgm(params, width, height, dmin, dmax),
cost_fine(width, height, dmin, dmax),
cost_medium(width, height, dmin, dmax),
cost_coarse(width, height, dmin, dmax),
l(width, height), r(width, height),
var_fine(width, height),
var_medium(width, height),
var_coarse(width, height) {
cost.add(0, cost_fine, var_fine);
cost.add(1, cost_medium, var_medium);
cost.add(2, cost_coarse, var_coarse);
}
};
StereoHStableSgm::StereoHStableSgm() : impl_(nullptr) {
impl_ = new Impl(params, 0, 0, 0, 0);
}
void StereoHStableSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
//cudaSetDevice(0);
if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(params, l.cols(), l.rows(), params.d_min, params.d_max);
}
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
static constexpr int DOWNSCALE_MEDIUM = 4;
static constexpr int DOWNSCALE_COARSE = 6;
Array2D<uchar> medium_l(l.cols()/DOWNSCALE_MEDIUM, l.rows()/DOWNSCALE_MEDIUM);
Array2D<uchar> medium_r(r.cols()/DOWNSCALE_MEDIUM, r.rows()/DOWNSCALE_MEDIUM);
Array2D<uchar> coarse_l(l.cols()/DOWNSCALE_COARSE, l.rows()/DOWNSCALE_COARSE);
Array2D<uchar> coarse_r(r.cols()/DOWNSCALE_COARSE, r.rows()/DOWNSCALE_COARSE);
cv::cuda::resize(impl_->l.toGpuMat(), medium_l.toGpuMat(), cv::Size(medium_l.width, medium_r.height));
cv::cuda::resize(impl_->r.toGpuMat(), medium_r.toGpuMat(), cv::Size(medium_r.width, medium_r.height));
cv::cuda::resize(impl_->l.toGpuMat(), coarse_l.toGpuMat(), cv::Size(coarse_l.width, coarse_l.height));
cv::cuda::resize(impl_->r.toGpuMat(), coarse_r.toGpuMat(), cv::Size(coarse_r.width, coarse_r.height));
cv::cuda::GpuMat var_fine = impl_->var_fine.toGpuMat();
variance_mask(impl_->l.toGpuMat(), var_fine, params.var_window);
cv::cuda::normalize(var_fine, var_fine, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::GpuMat var_medium; // = impl_->var_medium.toGpuMat();
variance_mask(medium_l.toGpuMat(), var_medium, params.var_window);
cv::cuda::normalize(var_medium, var_medium, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::resize(var_medium, impl_->var_medium.toGpuMat(), cv::Size(l.cols(), l.rows()));
cv::cuda::GpuMat var_coarse; // = impl_->var_coarse.toGpuMat();
variance_mask(coarse_l.toGpuMat(), var_coarse, params.var_window);
cv::cuda::normalize(var_coarse, var_coarse, params.alpha, params.beta, cv::NORM_MINMAX, -1);
cv::cuda::resize(var_coarse, impl_->var_coarse.toGpuMat(), cv::Size(l.cols(), l.rows()));
cv::Mat tmp;
impl_->var_coarse.toGpuMat().download(tmp);
cv::imshow("Var", tmp);
impl_->cost_fine.generateFilterMask(params.wsize, 16);
impl_->cost_medium.setFilter(impl_->cost_fine.getFilter());
impl_->cost_coarse.setFilter(impl_->cost_fine.getFilter());
impl_->cost_fine.set(impl_->l, impl_->r);
impl_->cost_medium.set(medium_l, medium_r, l.cols(), l.rows());
impl_->cost_coarse.set(coarse_l, coarse_r, l.cols(), l.rows());
impl_->cost.set();
impl_->compute(disparity);
}
StereoHStableSgm::~StereoHStableSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
|
9238c3272ea0b5fd3dd37113d82d71ff7f942434.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdlib.h>
//#include <string.h>
//#include <stdio.h>
#include "jim.h"
#include "jimautoconf.h"
#include "jim-subcmd.h"
static __device__ int history_cmd_getline(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __HIPCC__
char *line = nullptr; //Jim_HistoryGetline(Jim_String(argv[0]));
#else
char *line = Jim_HistoryGetline(Jim_String(argv[0]));
#endif
// On EOF returns -1 if varName was specified; otherwise the empty string.
if (line == NULL) {
if (argc == 2)
Jim_SetResultInt(interp, -1);
return JIM_OK;
}
Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, line, -1);
// Returns the length of the string if varName was specified
if (argc == 2) {
if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) {
Jim_FreeNewObj(interp, objPtr);
return JIM_ERROR;
}
Jim_SetResultInt(interp, Jim_Length(objPtr));
}
else
Jim_SetResult(interp, objPtr);
return JIM_OK;
}
static __device__ int history_cmd_load(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __HIPCC__
//Jim_HistoryLoad(Jim_String(argv[0]));
#else
Jim_HistoryLoad(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_save(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __HIPCC__
//Jim_HistorySave(Jim_String(argv[0]));
#else
Jim_HistorySave(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_add(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __HIPCC__
//Jim_HistoryAdd(Jim_String(argv[0]));
#else
Jim_HistoryAdd(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_show(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __HIPCC__
//Jim_HistoryShow();
#else
Jim_HistoryShow();
#endif
return JIM_OK;
}
__constant__ static const jim_subcmd_type _history_command_table[] = {
{ "getline", "prompt ?varname?", history_cmd_getline, 1, 2 }, // Description: Reads one line from the user. Similar to gets.
{ "load", "filename", history_cmd_load, 1, 1, }, // Description: Loads history from the given file, if possible
{ "save", "filename", history_cmd_save, 1, 1 }, // Description: Saves history to the given file
{ "add", "line", history_cmd_add, 1, 1 }, // Description: Adds the line to the history ands saves
{ "show", NULL, history_cmd_show, 0, 0 }, // Description: Displays the history
{ NULL }
};
static __device__ int JimHistorySubCmdProc(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return Jim_CallSubCmd(interp, Jim_ParseSubCmd(interp, _history_command_table, argc, argv), argc, argv);
}
static __device__ void JimHistoryDelProc(ClientData privData, Jim_Interp *interp)
{
Jim_Free(privData);
}
__device__ int Jim_historyInit(Jim_Interp *interp)
{
if (Jim_PackageProvide(interp, "history", "1.0", JIM_ERRMSG))
return JIM_ERROR;
void **history = (void **)Jim_Alloc(sizeof(*history));
*history = NULL;
Jim_CreateCommand(interp, "history", JimHistorySubCmdProc, history, JimHistoryDelProc);
return JIM_OK;
}
| 9238c3272ea0b5fd3dd37113d82d71ff7f942434.cu | //#include <stdlib.h>
//#include <string.h>
//#include <stdio.h>
#include "jim.h"
#include "jimautoconf.h"
#include "jim-subcmd.h"
static __device__ int history_cmd_getline(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __CUDACC__
char *line = nullptr; //Jim_HistoryGetline(Jim_String(argv[0]));
#else
char *line = Jim_HistoryGetline(Jim_String(argv[0]));
#endif
// On EOF returns -1 if varName was specified; otherwise the empty string.
if (line == NULL) {
if (argc == 2)
Jim_SetResultInt(interp, -1);
return JIM_OK;
}
Jim_Obj *objPtr = Jim_NewStringObjNoAlloc(interp, line, -1);
// Returns the length of the string if varName was specified
if (argc == 2) {
if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) {
Jim_FreeNewObj(interp, objPtr);
return JIM_ERROR;
}
Jim_SetResultInt(interp, Jim_Length(objPtr));
}
else
Jim_SetResult(interp, objPtr);
return JIM_OK;
}
static __device__ int history_cmd_load(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __CUDACC__
//Jim_HistoryLoad(Jim_String(argv[0]));
#else
Jim_HistoryLoad(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_save(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __CUDACC__
//Jim_HistorySave(Jim_String(argv[0]));
#else
Jim_HistorySave(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_add(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __CUDACC__
//Jim_HistoryAdd(Jim_String(argv[0]));
#else
Jim_HistoryAdd(Jim_String(argv[0]));
#endif
return JIM_OK;
}
static __device__ int history_cmd_show(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#if __CUDACC__
//Jim_HistoryShow();
#else
Jim_HistoryShow();
#endif
return JIM_OK;
}
__constant__ static const jim_subcmd_type _history_command_table[] = {
{ "getline", "prompt ?varname?", history_cmd_getline, 1, 2 }, // Description: Reads one line from the user. Similar to gets.
{ "load", "filename", history_cmd_load, 1, 1, }, // Description: Loads history from the given file, if possible
{ "save", "filename", history_cmd_save, 1, 1 }, // Description: Saves history to the given file
{ "add", "line", history_cmd_add, 1, 1 }, // Description: Adds the line to the history ands saves
{ "show", NULL, history_cmd_show, 0, 0 }, // Description: Displays the history
{ NULL }
};
static __device__ int JimHistorySubCmdProc(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return Jim_CallSubCmd(interp, Jim_ParseSubCmd(interp, _history_command_table, argc, argv), argc, argv);
}
static __device__ void JimHistoryDelProc(ClientData privData, Jim_Interp *interp)
{
Jim_Free(privData);
}
__device__ int Jim_historyInit(Jim_Interp *interp)
{
if (Jim_PackageProvide(interp, "history", "1.0", JIM_ERRMSG))
return JIM_ERROR;
void **history = (void **)Jim_Alloc(sizeof(*history));
*history = NULL;
Jim_CreateCommand(interp, "history", JimHistorySubCmdProc, history, JimHistoryDelProc);
return JIM_OK;
}
|
e34465063dc4d638ffbacd10a903753b2f4dab0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Name: matrix_transfer_2d.cu
* @Description: 3D Matrix (NxMxZ) Floating-Point Transfer.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: matrix_transfer_2d matrixRows matrixCols matrixZ blockSize
*
* Default values:
* matrixRows: 4096
* matrixCols: 4096
* matrixZ: 4096
* blockSize: 32
*/
#include <stdio.h>
#include <math.h>
#include "../../common/error.h"
#include "../../common/random.h"
#include "../../common/matrix.h"
#ifdef DOUBLE
#define REAL double
#else
#define REAL float
#endif
__global__ void matrixCopy(REAL *a, REAL *b, const unsigned int matrixRows, const unsigned int matrixCols, const unsigned int matrixZ) {
const unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= matrixRows || col >= matrixCols || z >= matrixZ) return;
const unsigned int pos = (z * matrixRows * matrixCols) + (row * matrixCols) + col;
b[pos] = a[pos];
}
__host__ void gpuMatrixCopy(REAL ***a, REAL ***b, const unsigned int matrixRows, const unsigned int matrixCols, const unsigned int matrixZ, const dim3 gridDim, const dim3 blockDim) {
REAL *dev_a = NULL; // device copies of a, b
REAL *dev_b = NULL; // device copies of a, b
const size_t size = matrixRows * matrixCols * matrixZ * sizeof(REAL); // bytes for a, b
const size_t sizeX = matrixCols * sizeof(REAL); // bytes for a, b (dimension X)
unsigned int z, r; // indices
// allocate device copies of a, b
HANDLE_ERROR(hipMalloc((void**)&dev_a, size));
HANDLE_ERROR(hipMalloc((void**)&dev_b, size));
// copy inputs to device
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
HANDLE_ERROR(hipMemcpy((void*)(dev_a + (z * matrixRows * matrixCols) + (r * matrixCols)), (const void*)a[z][r], sizeX, hipMemcpyHostToDevice));
}
}
// launch kernel matrixCopy()
hipLaunchKernelGGL(( matrixCopy), dim3(gridDim), dim3(blockDim) , 0, 0, dev_a, dev_b, matrixRows, matrixCols, matrixZ);
// copy device result back to host copy of b
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
HANDLE_ERROR(hipMemcpy((void*)b[z][r], (const void*)(dev_b + (z * matrixRows * matrixCols) + (r * matrixCols)), sizeX, hipMemcpyDeviceToHost));
}
}
// free device
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
}
int main(const int argc, const char **argv) {
REAL ***a, ***b = NULL; // host copies of a, b
unsigned int sizeX, sizeY, sizeZ; // bytes for a, b
unsigned int matrixRows, matrixCols, matrixZ; // matrix dimensions
unsigned int gridSizeX, gridSizeY, gridSizeZ; // grid size
unsigned int blockSize; // block size
hipDeviceProp_t gpuInfo; // gpu properties
unsigned int r, c, z; // indices
// check arguments
if (argc < 5) {
fprintf(stderr, "Usage: %s matrixRows matrixCols matrixZ blockSize\n", argv[0]);
exit(1);
}
matrixRows = atoi(argv[1]);
matrixCols = atoi(argv[2]);
matrixZ = atoi(argv[3]);
blockSize = atoi(argv[4]);
if (matrixRows < 1) {
fprintf(stderr, "Error: matrixRows expected >= 1, got %d\n", matrixRows);
exit(1);
}
if (matrixCols < 1) {
fprintf(stderr, "Error: matrixCols expected >= 1, got %d\n", matrixCols);
exit(1);
}
if (matrixZ < 1) {
fprintf(stderr, "Error: matrixZ expected >= 1, got %d\n", matrixZ);
exit(1);
}
if (blockSize < 1) {
fprintf(stderr, "Error: blockSize expected >= 1, got %d\n", blockSize);
exit(1);
}
// grid settings
gridSizeX = matrixCols / blockSize;
if (gridSizeX * blockSize < matrixCols) {
gridSizeX += 1;
}
gridSizeY = matrixRows / blockSize;
if (gridSizeY * blockSize < matrixRows) {
gridSizeY += 1;
}
gridSizeZ = matrixZ / blockSize;
if (gridSizeZ * blockSize < matrixZ) {
gridSizeZ += 1;
}
dim3 gridDim(gridSizeX, gridSizeY, gridSizeZ);
dim3 blockDim(blockSize, blockSize, blockSize);
sizeZ = matrixZ * sizeof(REAL**);
sizeY = matrixRows * sizeof(REAL*);
sizeX = matrixCols * sizeof(REAL);
HANDLE_ERROR(hipGetDeviceProperties(&gpuInfo, 0));
printf("-----------------------------------------\n");
printf("3D Matrix (NxMxZ) Floating-Point Transfer\n");
printf("-----------------------------------------\n");
#ifdef DOUBLE
printf("FP Precision: Double\n");
#else
printf("FP Precision: Single\n");
#endif
printf("Matrix Dimension: (%d, %d, %d)\n", matrixRows, matrixCols, matrixZ);
printf("Grid Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("-----------------------------------\n");
// allocate host copies of a, b
HANDLE_NULL(a = (REAL***)malloc(sizeZ));
for (z = 0; z < matrixZ; z++) {
HANDLE_NULL(a[z] = (REAL**)malloc(sizeY));
for (r = 0; r < matrixRows; r++) {
HANDLE_NULL(a[z][r] = (REAL*)malloc(sizeX));
}
}
HANDLE_NULL(b = (REAL***)malloc(sizeZ));
for (z = 0; z < matrixZ; z++) {
HANDLE_NULL(b[z] = (REAL**)malloc(sizeY));
for (r = 0; r < matrixRows; r++) {
HANDLE_NULL(b[z][r] = (REAL*)malloc(sizeX));
}
}
// fill a with random data
#ifdef DOUBLE
random_matrix_double_3d(a, matrixRows, matrixCols, matrixZ);
#else
random_matrix_float_3d(a, matrixRows, matrixCols, matrixZ);
#endif
// launch kernel matrixCopy()
gpuMatrixCopy(a, b, matrixRows, matrixCols, matrixZ, gridDim, blockDim);
// test result
bool err = false;
for (z = 0; z < matrixZ && !err; z++) {
for (r = 0; r < matrixRows && !err; r++) {
for (c = 0; c < matrixCols && !err; c++) {
if (a[z][r][c] != b[z][r][c]) {
err = true;
break;
}
}
}
}
if (err) {
fprintf(stderr, "Error\n");
} else {
printf("Correct!\n");
}
// free host
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
free(a[z][r]);
}
free(a[z]);
}
free(a);
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
free(b[z][r]);
}
free(b[z]);
}
free(b);
return 0;
}
| e34465063dc4d638ffbacd10a903753b2f4dab0c.cu | /*
* @Name: matrix_transfer_2d.cu
* @Description: 3D Matrix (NxMxZ) Floating-Point Transfer.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: matrix_transfer_2d matrixRows matrixCols matrixZ blockSize
*
* Default values:
* matrixRows: 4096
* matrixCols: 4096
* matrixZ: 4096
* blockSize: 32
*/
#include <stdio.h>
#include <math.h>
#include "../../common/error.h"
#include "../../common/random.h"
#include "../../common/matrix.h"
#ifdef DOUBLE
#define REAL double
#else
#define REAL float
#endif
__global__ void matrixCopy(REAL *a, REAL *b, const unsigned int matrixRows, const unsigned int matrixCols, const unsigned int matrixZ) {
const unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= matrixRows || col >= matrixCols || z >= matrixZ) return;
const unsigned int pos = (z * matrixRows * matrixCols) + (row * matrixCols) + col;
b[pos] = a[pos];
}
__host__ void gpuMatrixCopy(REAL ***a, REAL ***b, const unsigned int matrixRows, const unsigned int matrixCols, const unsigned int matrixZ, const dim3 gridDim, const dim3 blockDim) {
REAL *dev_a = NULL; // device copies of a, b
REAL *dev_b = NULL; // device copies of a, b
const size_t size = matrixRows * matrixCols * matrixZ * sizeof(REAL); // bytes for a, b
const size_t sizeX = matrixCols * sizeof(REAL); // bytes for a, b (dimension X)
unsigned int z, r; // indices
// allocate device copies of a, b
HANDLE_ERROR(cudaMalloc((void**)&dev_a, size));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, size));
// copy inputs to device
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
HANDLE_ERROR(cudaMemcpy((void*)(dev_a + (z * matrixRows * matrixCols) + (r * matrixCols)), (const void*)a[z][r], sizeX, cudaMemcpyHostToDevice));
}
}
// launch kernel matrixCopy()
matrixCopy<<< gridDim, blockDim >>>(dev_a, dev_b, matrixRows, matrixCols, matrixZ);
// copy device result back to host copy of b
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
HANDLE_ERROR(cudaMemcpy((void*)b[z][r], (const void*)(dev_b + (z * matrixRows * matrixCols) + (r * matrixCols)), sizeX, cudaMemcpyDeviceToHost));
}
}
// free device
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
}
int main(const int argc, const char **argv) {
REAL ***a, ***b = NULL; // host copies of a, b
unsigned int sizeX, sizeY, sizeZ; // bytes for a, b
unsigned int matrixRows, matrixCols, matrixZ; // matrix dimensions
unsigned int gridSizeX, gridSizeY, gridSizeZ; // grid size
unsigned int blockSize; // block size
cudaDeviceProp gpuInfo; // gpu properties
unsigned int r, c, z; // indices
// check arguments
if (argc < 5) {
fprintf(stderr, "Usage: %s matrixRows matrixCols matrixZ blockSize\n", argv[0]);
exit(1);
}
matrixRows = atoi(argv[1]);
matrixCols = atoi(argv[2]);
matrixZ = atoi(argv[3]);
blockSize = atoi(argv[4]);
if (matrixRows < 1) {
fprintf(stderr, "Error: matrixRows expected >= 1, got %d\n", matrixRows);
exit(1);
}
if (matrixCols < 1) {
fprintf(stderr, "Error: matrixCols expected >= 1, got %d\n", matrixCols);
exit(1);
}
if (matrixZ < 1) {
fprintf(stderr, "Error: matrixZ expected >= 1, got %d\n", matrixZ);
exit(1);
}
if (blockSize < 1) {
fprintf(stderr, "Error: blockSize expected >= 1, got %d\n", blockSize);
exit(1);
}
// grid settings
gridSizeX = matrixCols / blockSize;
if (gridSizeX * blockSize < matrixCols) {
gridSizeX += 1;
}
gridSizeY = matrixRows / blockSize;
if (gridSizeY * blockSize < matrixRows) {
gridSizeY += 1;
}
gridSizeZ = matrixZ / blockSize;
if (gridSizeZ * blockSize < matrixZ) {
gridSizeZ += 1;
}
dim3 gridDim(gridSizeX, gridSizeY, gridSizeZ);
dim3 blockDim(blockSize, blockSize, blockSize);
sizeZ = matrixZ * sizeof(REAL**);
sizeY = matrixRows * sizeof(REAL*);
sizeX = matrixCols * sizeof(REAL);
HANDLE_ERROR(cudaGetDeviceProperties(&gpuInfo, 0));
printf("-----------------------------------------\n");
printf("3D Matrix (NxMxZ) Floating-Point Transfer\n");
printf("-----------------------------------------\n");
#ifdef DOUBLE
printf("FP Precision: Double\n");
#else
printf("FP Precision: Single\n");
#endif
printf("Matrix Dimension: (%d, %d, %d)\n", matrixRows, matrixCols, matrixZ);
printf("Grid Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d, %d, %d) (max: (%d, %d, %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("-----------------------------------\n");
// allocate host copies of a, b
HANDLE_NULL(a = (REAL***)malloc(sizeZ));
for (z = 0; z < matrixZ; z++) {
HANDLE_NULL(a[z] = (REAL**)malloc(sizeY));
for (r = 0; r < matrixRows; r++) {
HANDLE_NULL(a[z][r] = (REAL*)malloc(sizeX));
}
}
HANDLE_NULL(b = (REAL***)malloc(sizeZ));
for (z = 0; z < matrixZ; z++) {
HANDLE_NULL(b[z] = (REAL**)malloc(sizeY));
for (r = 0; r < matrixRows; r++) {
HANDLE_NULL(b[z][r] = (REAL*)malloc(sizeX));
}
}
// fill a with random data
#ifdef DOUBLE
random_matrix_double_3d(a, matrixRows, matrixCols, matrixZ);
#else
random_matrix_float_3d(a, matrixRows, matrixCols, matrixZ);
#endif
// launch kernel matrixCopy()
gpuMatrixCopy(a, b, matrixRows, matrixCols, matrixZ, gridDim, blockDim);
// test result
bool err = false;
for (z = 0; z < matrixZ && !err; z++) {
for (r = 0; r < matrixRows && !err; r++) {
for (c = 0; c < matrixCols && !err; c++) {
if (a[z][r][c] != b[z][r][c]) {
err = true;
break;
}
}
}
}
if (err) {
fprintf(stderr, "Error\n");
} else {
printf("Correct!\n");
}
// free host
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
free(a[z][r]);
}
free(a[z]);
}
free(a);
for (z = 0; z < matrixZ; z++) {
for (r = 0; r < matrixRows; r++) {
free(b[z][r]);
}
free(b[z]);
}
free(b);
return 0;
}
|
047be0236268f2267671c3a47c02f188b0cdb1c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf/filling.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream.hpp>
TEST(ExpectsTest, FalseCondition)
{
EXPECT_THROW(CUDF_EXPECTS(false, "condition is false"), cudf::logic_error);
}
TEST(ExpectsTest, TrueCondition) { EXPECT_NO_THROW(CUDF_EXPECTS(true, "condition is true")); }
TEST(ExpectsTest, TryCatch)
{
CUDF_EXPECT_THROW_MESSAGE(CUDF_EXPECTS(false, "test reason"), "test reason");
}
TEST(CudaTryTest, Error)
{
CUDA_EXPECT_THROW_MESSAGE(CUDF_CUDA_TRY(hipErrorLaunchFailure),
"hipErrorLaunchFailure unspecified launch failure");
}
TEST(CudaTryTest, Success) { EXPECT_NO_THROW(CUDF_CUDA_TRY(hipSuccess)); }
TEST(CudaTryTest, TryCatch)
{
CUDA_EXPECT_THROW_MESSAGE(CUDF_CUDA_TRY(hipErrorMemoryAllocation),
"hipErrorMemoryAllocation out of memory");
}
TEST(StreamCheck, success) { EXPECT_NO_THROW(CUDF_CHECK_CUDA(0)); }
namespace {
// Some silly kernel that will cause an error
void __global__ test_kernel(int* data) { data[threadIdx.x] = threadIdx.x; }
} // namespace
// In a release build and without explicit synchronization, CUDF_CHECK_CUDA may
// or may not fail on erroneous asynchronous CUDA calls. Invoke
// hipStreamSynchronize to guarantee failure on error. In a non-release build,
// CUDF_CHECK_CUDA deterministically fails on erroneous asynchronous CUDA
// calls.
TEST(StreamCheck, FailedKernel)
{
rmm::cuda_stream stream;
int a;
hipLaunchKernelGGL(( test_kernel), dim3(0), dim3(0), 0, stream.value(), &a);
#ifdef NDEBUG
stream.synchronize();
#endif
EXPECT_THROW(CUDF_CHECK_CUDA(stream.value()), cudf::cuda_error);
}
TEST(StreamCheck, CatchFailedKernel)
{
rmm::cuda_stream stream;
int a;
hipLaunchKernelGGL(( test_kernel), dim3(0), dim3(0), 0, stream.value(), &a);
#ifndef NDEBUG
stream.synchronize();
#endif
CUDA_EXPECT_THROW_MESSAGE(CUDF_CHECK_CUDA(stream.value()),
"hipErrorInvalidConfiguration "
"invalid configuration argument");
}
__global__ void kernel(int* p) { *p = 42; }
TEST(DeathTest, CudaFatalError)
{
testing::FLAGS_gtest_death_test_style = "threadsafe";
auto call_kernel = []() {
int* p;
hipMalloc(&p, 2 * sizeof(int));
int* misaligned = (int*)(reinterpret_cast<char*>(p) + 1);
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, misaligned);
try {
CUDF_CUDA_TRY(hipDeviceSynchronize());
} catch (const cudf::fatal_cuda_error& fe) {
std::abort();
}
};
ASSERT_DEATH(call_kernel(), "");
}
#ifndef NDEBUG
__global__ void assert_false_kernel() { cudf_assert(false && "this kernel should die"); }
__global__ void assert_true_kernel() { cudf_assert(true && "this kernel should live"); }
TEST(DebugAssertDeathTest, cudf_assert_false)
{
testing::FLAGS_gtest_death_test_style = "threadsafe";
auto call_kernel = []() {
hipLaunchKernelGGL(( assert_false_kernel), dim3(1), dim3(1), 0, 0, );
// Kernel should fail with `hipErrorAssert`
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
if (hipErrorAssert == hipDeviceSynchronize()) { std::abort(); }
// If we reach this point, the cudf_assert didn't work so we exit normally, which will cause
// EXPECT_DEATH to fail.
};
EXPECT_DEATH(call_kernel(), "this kernel should die");
}
TEST(DebugAssert, cudf_assert_true)
{
hipLaunchKernelGGL(( assert_true_kernel), dim3(1), dim3(1), 0, 0, );
ASSERT_EQ(hipSuccess, hipDeviceSynchronize());
}
#endif
// These tests don't use CUDF_TEST_PROGRAM_MAIN because :
// 1.) They don't need the RMM Pool
// 2.) The RMM Pool interferes with the death test
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 047be0236268f2267671c3a47c02f188b0cdb1c5.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf_test/base_fixture.hpp>
#include <cudf/filling.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream.hpp>
TEST(ExpectsTest, FalseCondition)
{
EXPECT_THROW(CUDF_EXPECTS(false, "condition is false"), cudf::logic_error);
}
TEST(ExpectsTest, TrueCondition) { EXPECT_NO_THROW(CUDF_EXPECTS(true, "condition is true")); }
TEST(ExpectsTest, TryCatch)
{
CUDF_EXPECT_THROW_MESSAGE(CUDF_EXPECTS(false, "test reason"), "test reason");
}
TEST(CudaTryTest, Error)
{
CUDA_EXPECT_THROW_MESSAGE(CUDF_CUDA_TRY(cudaErrorLaunchFailure),
"cudaErrorLaunchFailure unspecified launch failure");
}
TEST(CudaTryTest, Success) { EXPECT_NO_THROW(CUDF_CUDA_TRY(cudaSuccess)); }
TEST(CudaTryTest, TryCatch)
{
CUDA_EXPECT_THROW_MESSAGE(CUDF_CUDA_TRY(cudaErrorMemoryAllocation),
"cudaErrorMemoryAllocation out of memory");
}
TEST(StreamCheck, success) { EXPECT_NO_THROW(CUDF_CHECK_CUDA(0)); }
namespace {
// Some silly kernel that will cause an error
void __global__ test_kernel(int* data) { data[threadIdx.x] = threadIdx.x; }
} // namespace
// In a release build and without explicit synchronization, CUDF_CHECK_CUDA may
// or may not fail on erroneous asynchronous CUDA calls. Invoke
// cudaStreamSynchronize to guarantee failure on error. In a non-release build,
// CUDF_CHECK_CUDA deterministically fails on erroneous asynchronous CUDA
// calls.
TEST(StreamCheck, FailedKernel)
{
rmm::cuda_stream stream;
int a;
test_kernel<<<0, 0, 0, stream.value()>>>(&a);
#ifdef NDEBUG
stream.synchronize();
#endif
EXPECT_THROW(CUDF_CHECK_CUDA(stream.value()), cudf::cuda_error);
}
TEST(StreamCheck, CatchFailedKernel)
{
rmm::cuda_stream stream;
int a;
test_kernel<<<0, 0, 0, stream.value()>>>(&a);
#ifndef NDEBUG
stream.synchronize();
#endif
CUDA_EXPECT_THROW_MESSAGE(CUDF_CHECK_CUDA(stream.value()),
"cudaErrorInvalidConfiguration "
"invalid configuration argument");
}
__global__ void kernel(int* p) { *p = 42; }
TEST(DeathTest, CudaFatalError)
{
testing::FLAGS_gtest_death_test_style = "threadsafe";
auto call_kernel = []() {
int* p;
cudaMalloc(&p, 2 * sizeof(int));
int* misaligned = (int*)(reinterpret_cast<char*>(p) + 1);
kernel<<<1, 1>>>(misaligned);
try {
CUDF_CUDA_TRY(cudaDeviceSynchronize());
} catch (const cudf::fatal_cuda_error& fe) {
std::abort();
}
};
ASSERT_DEATH(call_kernel(), "");
}
#ifndef NDEBUG
__global__ void assert_false_kernel() { cudf_assert(false && "this kernel should die"); }
__global__ void assert_true_kernel() { cudf_assert(true && "this kernel should live"); }
TEST(DebugAssertDeathTest, cudf_assert_false)
{
testing::FLAGS_gtest_death_test_style = "threadsafe";
auto call_kernel = []() {
assert_false_kernel<<<1, 1>>>();
// Kernel should fail with `cudaErrorAssert`
// This error invalidates the current device context, so we need to kill
// the current process. Running with EXPECT_DEATH spawns a new process for
// each attempted kernel launch
if (cudaErrorAssert == cudaDeviceSynchronize()) { std::abort(); }
// If we reach this point, the cudf_assert didn't work so we exit normally, which will cause
// EXPECT_DEATH to fail.
};
EXPECT_DEATH(call_kernel(), "this kernel should die");
}
TEST(DebugAssert, cudf_assert_true)
{
assert_true_kernel<<<1, 1>>>();
ASSERT_EQ(cudaSuccess, cudaDeviceSynchronize());
}
#endif
// These tests don't use CUDF_TEST_PROGRAM_MAIN because :
// 1.) They don't need the RMM Pool
// 2.) The RMM Pool interferes with the death test
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
ddb5a537af1e68a5e84e06a268d4e0db0181a5fc.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel_maxmin.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define RANGE 2048
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; //graph inputfile
file_format = atoi(argv[2]); //graph format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
srand(7);
// Allocate the CSR structure
csr_array *csr;
// Parse graph file and store into a CSR format
if (file_format == 1)
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 0)
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
else {
printf("reserve for future");
exit(1);
}
// Allocate the vertex value array
int *node_value = (int *)malloc(num_nodes * sizeof(int));
if (!node_value) fprintf(stderr, "node_value malloc failed\n");
// Allocate the color array
int *color = (int *)malloc(num_nodes * sizeof(int));
if (!color) fprintf(stderr, "color malloc failed\n");
// Initialize all the colors to -1
// Randomize the value for each vertex
for (int i = 0; i < num_nodes; i++) {
color[i] = -1;
node_value[i] = rand() % RANGE;
}
int *row_d;
int *col_d;
int *max_d;
int *min_d;
int *color_d;
int *node_value_d;
int *stop_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d): %s\n", num_edges , hipGetErrorString(err));
return -1;
}
// Termination variable
err = hipMalloc(&stop_d, sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc stop_d (size:%d) => %s\n", 1 , hipGetErrorString(err));
return -1;
}
// Create device-side buffers for color
err = hipMalloc(&color_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc color_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&node_value_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc node_value_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&max_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc max_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&min_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc min_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
// Copy data to device-side buffers
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
err = hipMemcpy(color_d, color, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy color_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(node_value_d, node_value, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy node_value_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
// Set up kernel dimensions
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
int stop = 1;
int graph_color = 1;
// Initialize arrays
hipLaunchKernelGGL(( ini), dim3(grid), dim3(threads) , 0, 0, max_d, min_d, num_nodes);
// Main computation loop
double timer3 = gettime();
while (stop) {
stop = 0;
// Copy the termination variable to the device
err = hipMemcpy(stop_d, &stop, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: write stop_d: %s\n", hipGetErrorString(err));
}
// Launch the color kernel 1
hipLaunchKernelGGL(( color1) , dim3(grid), dim3(threads) , 0, 0, row_d, col_d, node_value_d, color_d,
stop_d, max_d, min_d, graph_color,
num_nodes, num_edges);
// Launch the color kernel 2
hipLaunchKernelGGL(( color2) , dim3(grid), dim3(threads) , 0, 0, node_value_d, color_d, max_d, min_d,
graph_color, num_nodes, num_edges);
err = hipMemcpy(&stop, stop_d, sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: read stop_d: %s\n", hipGetErrorString(err));
}
// Update the color label for the next iter
graph_color = graph_color + 2;
}
hipDeviceSynchronize();
double timer4 = gettime();
// Copy back the color array
err = hipMemcpy(color, color_d, num_nodes * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("ERROR: hipMemcpy(): %s\n", hipGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print out color and timing statistics
printf("total number of colors used: %d\n", graph_color);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Dump the color array into an output file
print_vector(color, num_nodes);
#endif
// Free host-side buffers
free(node_value);
free(color);
csr->freeArrays();
free(csr);
// Free CUDA buffers
hipFree(row_d);
hipFree(col_d);
hipFree(max_d);
hipFree(color_d);
hipFree(node_value_d);
hipFree(stop_d);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
| ddb5a537af1e68a5e84e06a268d4e0db0181a5fc.cu | /************************************************************************************\
* *
* Copyright © 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel_maxmin.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define RANGE 2048
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; //graph inputfile
file_format = atoi(argv[2]); //graph format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
srand(7);
// Allocate the CSR structure
csr_array *csr;
// Parse graph file and store into a CSR format
if (file_format == 1)
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 0)
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
else {
printf("reserve for future");
exit(1);
}
// Allocate the vertex value array
int *node_value = (int *)malloc(num_nodes * sizeof(int));
if (!node_value) fprintf(stderr, "node_value malloc failed\n");
// Allocate the color array
int *color = (int *)malloc(num_nodes * sizeof(int));
if (!color) fprintf(stderr, "color malloc failed\n");
// Initialize all the colors to -1
// Randomize the value for each vertex
for (int i = 0; i < num_nodes; i++) {
color[i] = -1;
node_value[i] = rand() % RANGE;
}
int *row_d;
int *col_d;
int *max_d;
int *min_d;
int *color_d;
int *node_value_d;
int *stop_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d): %s\n", num_edges , cudaGetErrorString(err));
return -1;
}
// Termination variable
err = cudaMalloc(&stop_d, sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1 , cudaGetErrorString(err));
return -1;
}
// Create device-side buffers for color
err = cudaMalloc(&color_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc color_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&node_value_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc node_value_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&max_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc max_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&min_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc min_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
// Copy data to device-side buffers
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
err = cudaMemcpy(color_d, color, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy color_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(node_value_d, node_value, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy node_value_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
// Set up kernel dimensions
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
int stop = 1;
int graph_color = 1;
// Initialize arrays
ini<<< grid, threads >>>(max_d, min_d, num_nodes);
// Main computation loop
double timer3 = gettime();
while (stop) {
stop = 0;
// Copy the termination variable to the device
err = cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: write stop_d: %s\n", cudaGetErrorString(err));
}
// Launch the color kernel 1
color1 <<< grid, threads >>>(row_d, col_d, node_value_d, color_d,
stop_d, max_d, min_d, graph_color,
num_nodes, num_edges);
// Launch the color kernel 2
color2 <<< grid, threads >>>(node_value_d, color_d, max_d, min_d,
graph_color, num_nodes, num_edges);
err = cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read stop_d: %s\n", cudaGetErrorString(err));
}
// Update the color label for the next iter
graph_color = graph_color + 2;
}
cudaThreadSynchronize();
double timer4 = gettime();
// Copy back the color array
err = cudaMemcpy(color, color_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("ERROR: cudaMemcpy(): %s\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print out color and timing statistics
printf("total number of colors used: %d\n", graph_color);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Dump the color array into an output file
print_vector(color, num_nodes);
#endif
// Free host-side buffers
free(node_value);
free(color);
csr->freeArrays();
free(csr);
// Free CUDA buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(max_d);
cudaFree(color_d);
cudaFree(node_value_d);
cudaFree(stop_d);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
|
d80ca3f228df6c25d827ecbf30a0dad75581875b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
void genRandomString(char *str,int length)
{
for(int i=0;i<length-1;++i)
{
str[i] = 'a' + rand()%26;
}
str[length-1] = '\0';
}
void genRandomSubString(char *str,int length,int sub_len)
{
for(int i=0;i<length-1;++i)
{
if(i>0 && ((i+1)%sub_len==0)) str[i] = '\0';
else str[i] = 'a' + rand()%26;
}
str[length-1] = '\0';
}
/*__global__ void my_strstr(char *str,char *sub_string,char ** position,int str_len,int sub_len,int num_sub)
{
int id = threadIdx.x;
char *sub = &sub_string[id*sub_len];
char *string = str;
char *a,*b;
b = sub;
printf("in GPU string is %s sub is %s\n",string,b);
for(;*string != '\0';++string){
a = string;
if(*a == *b){
printf("thread %d find a possible sub %s\n",id,b);
while(*(++a) == *(++b)){
printf("thread %d find a more and more possible sub %s\n",id,b);
if(*(b+1) == '\0'){
printf("thread %d find a sub %s\n",id,b);
position[id] = string;
printf("sting match in %s\n",position[id]);
}
}
}
b = sub;
}
}*/
char * my_strstr(char *str,char *sub,int str_len,int sub_len)
{
if(str_len < sub_len) return NULL;
if(str_len != 0 && sub_len == 0) return NULL;
if(str_len == 0 && sub_len == 0) return NULL;
int m, n;
for(int i=0;i<str_len;++i){
m = 0;
n = i;
if(str[n]==sub[m]){
while(str[++n] == sub[++m]){
if(sub[m+1] == '\0') return str+i;
}
}
}
return NULL;
}
__global__ void my_strstr(char *str,char *sub_string,char ** position,int str_len,int sub_len,int num_sub)
{
int id = threadIdx.x;
//char *sub = &sub_string[id*sub_len];
char *result = NULL;
char sub[24];
//load sub in register,great improve
for(int i=0;i<sub_len;++i){
sub[i] = sub_string[id*sub_len+i];}
//best case using Shared memory
extern __shared__ char s_string[];
//every thread has to fetch how many values from global memory to shared memory
int each_num = str_len/blockDim.x;
for(int i=0;i<each_num;++i){
s_string[i*blockDim.x+id] = str[i*blockDim.x+id];}
if( ((each_num*blockDim.x+id) < str_len) && (blockDim.x > each_num) )
s_string[each_num*blockDim.x+id] = str[each_num*blockDim.x+id];
char *string = s_string;
char *a,*b;
//b point to the sub address in register rather than in global memory
b = sub;
//result == NULL to judge if we find a match;rather than use goto or break in loop which harm the calculation
for(int i = 0;(*string != '\0')&&(result == NULL);i++){
//printf("i am %d\n",id);
a = string;
while(*a++ == *b++){
if(*(b+1) == '\0'){
result = string;
}
}
b = sub;
++string;
}
//coalesced global memory store, no effect since we only store once
position[id] = result;
}
int main()
{
int LENGTH = 4096, len = 24, num_sub = 100;
int num_block,num_thread;
if(num_sub < 512){
num_block = 1;
num_thread = num_sub;
}
else{
num_block = num_sub / 512;
num_thread = 512;
}
char haystack[LENGTH];
char subs[num_sub*len];
char *position[num_sub];
char *h_position[num_sub];
genRandomString(haystack,LENGTH);
genRandomSubString(subs,len*num_sub,len);
char *d_string,*d_subs;
char **d_position;
hipMalloc((void**)&d_string,sizeof(char)*LENGTH);
hipMalloc((void**)&d_subs,sizeof(char)*num_sub*len);
hipMalloc((void***)&d_position,sizeof(char*)*num_sub);
hipMemset(d_position,0,sizeof(char*)*num_sub);
memset(h_position,0,sizeof(char*)*num_sub);
const size_t smem = sizeof(char)*LENGTH;
char h_subs[num_sub][len];
for(int i=0;i<num_sub;++i){
for(int j=0;j<len;++j){
h_subs[i][j] = subs[i*len+j];
}
}
/*CPU*/
char *ret;
struct timeval start,end;
gettimeofday(&start, NULL );
for(int i=0;i<num_sub;++i)
{
ret = my_strstr(haystack,h_subs[i],LENGTH,len);
if(ret != NULL){
printf("find one sub string in %d sub\n",i);
printf("%s\n",ret);
}
position[i] = ret;
}
gettimeofday(&end, NULL );
float timeuse =1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("CPU time=%f\n",timeuse /1000.0);
/*GPU*/
gettimeofday(&start, NULL );
for(int i=0;i<50;++i)
{
hipMemcpy(d_string,haystack,sizeof(char)*LENGTH,hipMemcpyHostToDevice);
hipMemcpy(d_subs,subs,sizeof(char)*num_sub*len,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( my_strstr), dim3(num_block),dim3(num_thread),smem, 0, d_string,d_subs,d_position,LENGTH,len,num_sub);
hipDeviceSynchronize();
hipMemcpy(h_position,d_position,sizeof(char*)*num_sub,hipMemcpyDeviceToHost);
}
gettimeofday(&end, NULL );
timeuse =1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("GPU time=%f\n",timeuse /1000.0/50);
/*check*/
//in small size GPU works well
/*for(int i=0;i<num_sub;++i){
if(h_position[i] == position[i]){
printf("ok in %d sub\n",i);
if(position[i] != NULL){
printf("%s\n",position[i]);
}
}
else{
printf("error !!!!!!");
if(position[i] != NULL){
printf("CPU find match %s\n",position[i]);}
//because h_position[i] point to the address in GPU , causing segment error
if(h_position[i] != NULL){
printf("GPU find match %s\n",h_position[i]);}
}
}*/
return(0);
}
| d80ca3f228df6c25d827ecbf30a0dad75581875b.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
void genRandomString(char *str,int length)
{
for(int i=0;i<length-1;++i)
{
str[i] = 'a' + rand()%26;
}
str[length-1] = '\0';
}
void genRandomSubString(char *str,int length,int sub_len)
{
for(int i=0;i<length-1;++i)
{
if(i>0 && ((i+1)%sub_len==0)) str[i] = '\0';
else str[i] = 'a' + rand()%26;
}
str[length-1] = '\0';
}
/*__global__ void my_strstr(char *str,char *sub_string,char ** position,int str_len,int sub_len,int num_sub)
{
int id = threadIdx.x;
char *sub = &sub_string[id*sub_len];
char *string = str;
char *a,*b;
b = sub;
printf("in GPU string is %s sub is %s\n",string,b);
for(;*string != '\0';++string){
a = string;
if(*a == *b){
printf("thread %d find a possible sub %s\n",id,b);
while(*(++a) == *(++b)){
printf("thread %d find a more and more possible sub %s\n",id,b);
if(*(b+1) == '\0'){
printf("thread %d find a sub %s\n",id,b);
position[id] = string;
printf("sting match in %s\n",position[id]);
}
}
}
b = sub;
}
}*/
char * my_strstr(char *str,char *sub,int str_len,int sub_len)
{
if(str_len < sub_len) return NULL;
if(str_len != 0 && sub_len == 0) return NULL;
if(str_len == 0 && sub_len == 0) return NULL;
int m, n;
for(int i=0;i<str_len;++i){
m = 0;
n = i;
if(str[n]==sub[m]){
while(str[++n] == sub[++m]){
if(sub[m+1] == '\0') return str+i;
}
}
}
return NULL;
}
__global__ void my_strstr(char *str,char *sub_string,char ** position,int str_len,int sub_len,int num_sub)
{
int id = threadIdx.x;
//char *sub = &sub_string[id*sub_len];
char *result = NULL;
char sub[24];
//load sub in register,great improve
for(int i=0;i<sub_len;++i){
sub[i] = sub_string[id*sub_len+i];}
//best case using Shared memory
extern __shared__ char s_string[];
//every thread has to fetch how many values from global memory to shared memory
int each_num = str_len/blockDim.x;
for(int i=0;i<each_num;++i){
s_string[i*blockDim.x+id] = str[i*blockDim.x+id];}
if( ((each_num*blockDim.x+id) < str_len) && (blockDim.x > each_num) )
s_string[each_num*blockDim.x+id] = str[each_num*blockDim.x+id];
char *string = s_string;
char *a,*b;
//b point to the sub address in register rather than in global memory
b = sub;
//result == NULL to judge if we find a match;rather than use goto or break in loop which harm the calculation
for(int i = 0;(*string != '\0')&&(result == NULL);i++){
//printf("i am %d\n",id);
a = string;
while(*a++ == *b++){
if(*(b+1) == '\0'){
result = string;
}
}
b = sub;
++string;
}
//coalesced global memory store, no effect since we only store once
position[id] = result;
}
int main()
{
int LENGTH = 4096, len = 24, num_sub = 100;
int num_block,num_thread;
if(num_sub < 512){
num_block = 1;
num_thread = num_sub;
}
else{
num_block = num_sub / 512;
num_thread = 512;
}
char haystack[LENGTH];
char subs[num_sub*len];
char *position[num_sub];
char *h_position[num_sub];
genRandomString(haystack,LENGTH);
genRandomSubString(subs,len*num_sub,len);
char *d_string,*d_subs;
char **d_position;
cudaMalloc((void**)&d_string,sizeof(char)*LENGTH);
cudaMalloc((void**)&d_subs,sizeof(char)*num_sub*len);
cudaMalloc((void***)&d_position,sizeof(char*)*num_sub);
cudaMemset(d_position,0,sizeof(char*)*num_sub);
memset(h_position,0,sizeof(char*)*num_sub);
const size_t smem = sizeof(char)*LENGTH;
char h_subs[num_sub][len];
for(int i=0;i<num_sub;++i){
for(int j=0;j<len;++j){
h_subs[i][j] = subs[i*len+j];
}
}
/*CPU*/
char *ret;
struct timeval start,end;
gettimeofday(&start, NULL );
for(int i=0;i<num_sub;++i)
{
ret = my_strstr(haystack,h_subs[i],LENGTH,len);
if(ret != NULL){
printf("find one sub string in %d sub\n",i);
printf("%s\n",ret);
}
position[i] = ret;
}
gettimeofday(&end, NULL );
float timeuse =1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("CPU time=%f\n",timeuse /1000.0);
/*GPU*/
gettimeofday(&start, NULL );
for(int i=0;i<50;++i)
{
cudaMemcpy(d_string,haystack,sizeof(char)*LENGTH,cudaMemcpyHostToDevice);
cudaMemcpy(d_subs,subs,sizeof(char)*num_sub*len,cudaMemcpyHostToDevice);
my_strstr<<<num_block,num_thread,smem>>>(d_string,d_subs,d_position,LENGTH,len,num_sub);
cudaDeviceSynchronize();
cudaMemcpy(h_position,d_position,sizeof(char*)*num_sub,cudaMemcpyDeviceToHost);
}
gettimeofday(&end, NULL );
timeuse =1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("GPU time=%f\n",timeuse /1000.0/50);
/*check*/
//in small size GPU works well
/*for(int i=0;i<num_sub;++i){
if(h_position[i] == position[i]){
printf("ok in %d sub\n",i);
if(position[i] != NULL){
printf("%s\n",position[i]);
}
}
else{
printf("error !!!!!!");
if(position[i] != NULL){
printf("CPU find match %s\n",position[i]);}
//because h_position[i] point to the address in GPU , causing segment error
if(h_position[i] != NULL){
printf("GPU find match %s\n",h_position[i]);}
}
}*/
return(0);
}
|
925f6765e3c373abb52bf88da8e3e3e72aa0af32.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
void reference (
int numNeurons, int neurons_per_item, float dt,
float*__restrict__ encode_result,
float*__restrict__ voltage_array,
float*__restrict__ reftime_array,
float tau_rc, float tau_ref,
float*__restrict__ bias,
float*__restrict__ gain,
float*__restrict__ spikes)
{
for (int i = 0; i < numNeurons; i++)
{
int neuron_index = i % neurons_per_item;
int item_index = (int)(i / neurons_per_item);
float voltage = voltage_array[i];
float ref_time = reftime_array[i];
float current = bias[neuron_index] + gain[neuron_index] * encode_result[item_index];
float dV, spike, mult;
dV = -expm1f(-dt / tau_rc) * (current - voltage);
voltage = fmaxf(voltage + dV, 0.f);
ref_time -= dt;
mult = ref_time;
mult *= -1.f / dt;
mult += 1.f;
mult = mult > 1.f ? 1.f : mult;
mult = mult < 0.f ? 0.f : mult;
voltage *= mult;
//printf("%d voltage = %f\n", i, voltage);
if(voltage > 1.f){
spike = 1.f / dt;
ref_time = tau_ref + dt * (1.f - (voltage - 1.f) / dV);
voltage = 0.f;
}else{
spike = 0.f;
}
reftime_array[i] = ref_time;
voltage_array[i] = voltage;
spikes[i] = spike;
}
}
__global__ void lif (
int numNeurons, int neurons_per_item, float dt,
const float*__restrict__ encode_result,
float*__restrict__ voltage_array,
float*__restrict__ reftime_array,
float tau_rc, float tau_ref,
const float*__restrict__ bias,
const float*__restrict__ gain,
float*__restrict__ spikes)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numNeurons)
{
int neuron_index = i % neurons_per_item;
int item_index = (int)(i / neurons_per_item);
float voltage = voltage_array[i];
float ref_time = reftime_array[i];
float current = bias[neuron_index] + gain[neuron_index] * encode_result[item_index];
float dV, spike, mult;
dV = -expm1f(-dt / tau_rc) * (current - voltage);
voltage = fmaxf(voltage + dV, 0.f);
ref_time -= dt;
mult = ref_time;
mult *= -1.f / dt;
mult += 1.f;
mult = mult > 1.f ? 1.f : mult;
mult = mult < 0.f ? 0.f : mult;
voltage *= mult;
if(voltage > 1.f){
spike = 1.f / dt;
ref_time = tau_ref + dt * (1.f - (voltage - 1.f) / dV);
voltage = 0.f;
}else{
spike = 0.f;
}
reftime_array[i] = ref_time;
voltage_array[i] = voltage;
spikes[i] = spike;
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <neurons per item> <num_items> <num_steps>\n", argv[0]);
return 1;
}
const int neurons_per_item = atoi(argv[1]);
const int num_items = atoi(argv[2]);
const int num_steps = atoi(argv[3]);
const int num_neurons = neurons_per_item * num_items;
const size_t neurons_size = num_neurons * sizeof(float);
const size_t items_size = num_items * sizeof(float);
const size_t neurons_per_item_size = neurons_per_item * sizeof(float);
float dt = 0.1; // time step
float tau_rc = 10; // membrane time constant
float tau_ref = 2; // refactory time
float* encode_result = (float*) malloc (items_size);
float* bias = (float*) malloc (neurons_per_item_size);
float* gain = (float*) malloc (neurons_per_item_size);
// test
float* voltage = (float*) malloc (neurons_size);
float* reftime = (float*) malloc (neurons_size);
float* spikes = (float*) malloc (neurons_size);;
// expected
float* voltage_gold = (float*) malloc (neurons_size);
float* reftime_gold = (float*) malloc (neurons_size);
float* spikes_gold = (float*) malloc (neurons_size);;
srand(123);
for (int i = 0; i < num_items; i++) {
encode_result[i] = rand() / (float)RAND_MAX;
}
for (int i = 0; i < num_neurons; i++) {
voltage_gold[i] = voltage[i] = 1.f + rand() / (float)RAND_MAX;
reftime_gold[i] = reftime[i] = rand() % 5 / 10.f;
}
for (int i = 0; i < neurons_per_item; i++) {
bias[i] = rand() / (float)RAND_MAX;
gain[i] = rand() / (float)RAND_MAX + 0.5f;
}
float* d_encode_result;
float* d_bias;
float* d_gain;
hipMalloc((void**)&d_encode_result, items_size);
hipMalloc((void**)&d_bias, neurons_per_item_size);
hipMalloc((void**)&d_gain, neurons_per_item_size);
// test
float* d_voltage;
float* d_reftime;
float* d_spikes;
hipMalloc((void**)&d_voltage, neurons_size);
hipMalloc((void**)&d_reftime, neurons_size);
hipMalloc((void**)&d_spikes, neurons_size);
hipMemcpy(d_encode_result, encode_result, items_size, hipMemcpyHostToDevice);
hipMemcpy(d_bias, bias, neurons_per_item_size, hipMemcpyHostToDevice);
hipMemcpy(d_gain, gain, neurons_per_item_size, hipMemcpyHostToDevice);
hipMemcpy(d_voltage, voltage, neurons_size, hipMemcpyHostToDevice);
hipMemcpy(d_reftime, reftime, neurons_size, hipMemcpyHostToDevice);
dim3 blocks (256);
dim3 grids ((num_neurons + 255) / 256);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int step = 0; step < num_steps; step++) {
hipLaunchKernelGGL(( lif), dim3(grids), dim3(blocks), 0, 0,
num_neurons,
neurons_per_item,
dt,
d_encode_result,
d_voltage,
d_reftime,
tau_rc,
tau_ref,
d_bias,
d_gain,
d_spikes);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (elapsed_time * 1e-3) / num_steps);
hipMemcpy(spikes, d_spikes, neurons_size, hipMemcpyDeviceToHost);
hipMemcpy(voltage, d_voltage, neurons_size, hipMemcpyDeviceToHost);
hipMemcpy(reftime, d_reftime, neurons_size, hipMemcpyDeviceToHost);
for(int step = 0; step < num_steps; step++) {
reference(num_neurons,
neurons_per_item,
dt,
encode_result,
voltage_gold,
reftime_gold,
tau_rc,
tau_ref,
bias,
gain,
spikes_gold);
}
bool ok = true;
for (int i = 0; i < num_neurons; i++) {
if (fabsf(spikes[i] - spikes_gold[i]) > 1e-3) {
printf("@%d: %f %f\n", i, spikes[i], spikes_gold[i]);
ok = false;
break;
}
}
free(encode_result);
free(voltage);
free(voltage_gold);
free(reftime);
free(reftime_gold);
free(bias);
free(gain);
free(spikes);
free(spikes_gold);
hipFree(d_encode_result);
hipFree(d_voltage);
hipFree(d_reftime);
hipFree(d_bias);
hipFree(d_gain);
hipFree(d_spikes);
printf("%s\n", ok ? "PASS" : "FAIL");
return 0;
}
| 925f6765e3c373abb52bf88da8e3e3e72aa0af32.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <math.h>
#include <chrono>
#include <cuda.h>
void reference (
int numNeurons, int neurons_per_item, float dt,
float*__restrict__ encode_result,
float*__restrict__ voltage_array,
float*__restrict__ reftime_array,
float tau_rc, float tau_ref,
float*__restrict__ bias,
float*__restrict__ gain,
float*__restrict__ spikes)
{
for (int i = 0; i < numNeurons; i++)
{
int neuron_index = i % neurons_per_item;
int item_index = (int)(i / neurons_per_item);
float voltage = voltage_array[i];
float ref_time = reftime_array[i];
float current = bias[neuron_index] + gain[neuron_index] * encode_result[item_index];
float dV, spike, mult;
dV = -expm1f(-dt / tau_rc) * (current - voltage);
voltage = fmaxf(voltage + dV, 0.f);
ref_time -= dt;
mult = ref_time;
mult *= -1.f / dt;
mult += 1.f;
mult = mult > 1.f ? 1.f : mult;
mult = mult < 0.f ? 0.f : mult;
voltage *= mult;
//printf("%d voltage = %f\n", i, voltage);
if(voltage > 1.f){
spike = 1.f / dt;
ref_time = tau_ref + dt * (1.f - (voltage - 1.f) / dV);
voltage = 0.f;
}else{
spike = 0.f;
}
reftime_array[i] = ref_time;
voltage_array[i] = voltage;
spikes[i] = spike;
}
}
__global__ void lif (
int numNeurons, int neurons_per_item, float dt,
const float*__restrict__ encode_result,
float*__restrict__ voltage_array,
float*__restrict__ reftime_array,
float tau_rc, float tau_ref,
const float*__restrict__ bias,
const float*__restrict__ gain,
float*__restrict__ spikes)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < numNeurons)
{
int neuron_index = i % neurons_per_item;
int item_index = (int)(i / neurons_per_item);
float voltage = voltage_array[i];
float ref_time = reftime_array[i];
float current = bias[neuron_index] + gain[neuron_index] * encode_result[item_index];
float dV, spike, mult;
dV = -expm1f(-dt / tau_rc) * (current - voltage);
voltage = fmaxf(voltage + dV, 0.f);
ref_time -= dt;
mult = ref_time;
mult *= -1.f / dt;
mult += 1.f;
mult = mult > 1.f ? 1.f : mult;
mult = mult < 0.f ? 0.f : mult;
voltage *= mult;
if(voltage > 1.f){
spike = 1.f / dt;
ref_time = tau_ref + dt * (1.f - (voltage - 1.f) / dV);
voltage = 0.f;
}else{
spike = 0.f;
}
reftime_array[i] = ref_time;
voltage_array[i] = voltage;
spikes[i] = spike;
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: %s <neurons per item> <num_items> <num_steps>\n", argv[0]);
return 1;
}
const int neurons_per_item = atoi(argv[1]);
const int num_items = atoi(argv[2]);
const int num_steps = atoi(argv[3]);
const int num_neurons = neurons_per_item * num_items;
const size_t neurons_size = num_neurons * sizeof(float);
const size_t items_size = num_items * sizeof(float);
const size_t neurons_per_item_size = neurons_per_item * sizeof(float);
float dt = 0.1; // time step
float tau_rc = 10; // membrane time constant
float tau_ref = 2; // refactory time
float* encode_result = (float*) malloc (items_size);
float* bias = (float*) malloc (neurons_per_item_size);
float* gain = (float*) malloc (neurons_per_item_size);
// test
float* voltage = (float*) malloc (neurons_size);
float* reftime = (float*) malloc (neurons_size);
float* spikes = (float*) malloc (neurons_size);;
// expected
float* voltage_gold = (float*) malloc (neurons_size);
float* reftime_gold = (float*) malloc (neurons_size);
float* spikes_gold = (float*) malloc (neurons_size);;
srand(123);
for (int i = 0; i < num_items; i++) {
encode_result[i] = rand() / (float)RAND_MAX;
}
for (int i = 0; i < num_neurons; i++) {
voltage_gold[i] = voltage[i] = 1.f + rand() / (float)RAND_MAX;
reftime_gold[i] = reftime[i] = rand() % 5 / 10.f;
}
for (int i = 0; i < neurons_per_item; i++) {
bias[i] = rand() / (float)RAND_MAX;
gain[i] = rand() / (float)RAND_MAX + 0.5f;
}
float* d_encode_result;
float* d_bias;
float* d_gain;
cudaMalloc((void**)&d_encode_result, items_size);
cudaMalloc((void**)&d_bias, neurons_per_item_size);
cudaMalloc((void**)&d_gain, neurons_per_item_size);
// test
float* d_voltage;
float* d_reftime;
float* d_spikes;
cudaMalloc((void**)&d_voltage, neurons_size);
cudaMalloc((void**)&d_reftime, neurons_size);
cudaMalloc((void**)&d_spikes, neurons_size);
cudaMemcpy(d_encode_result, encode_result, items_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_bias, bias, neurons_per_item_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_gain, gain, neurons_per_item_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_voltage, voltage, neurons_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_reftime, reftime, neurons_size, cudaMemcpyHostToDevice);
dim3 blocks (256);
dim3 grids ((num_neurons + 255) / 256);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int step = 0; step < num_steps; step++) {
lif<<<grids, blocks>>>(
num_neurons,
neurons_per_item,
dt,
d_encode_result,
d_voltage,
d_reftime,
tau_rc,
tau_ref,
d_bias,
d_gain,
d_spikes);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (elapsed_time * 1e-3) / num_steps);
cudaMemcpy(spikes, d_spikes, neurons_size, cudaMemcpyDeviceToHost);
cudaMemcpy(voltage, d_voltage, neurons_size, cudaMemcpyDeviceToHost);
cudaMemcpy(reftime, d_reftime, neurons_size, cudaMemcpyDeviceToHost);
for(int step = 0; step < num_steps; step++) {
reference(num_neurons,
neurons_per_item,
dt,
encode_result,
voltage_gold,
reftime_gold,
tau_rc,
tau_ref,
bias,
gain,
spikes_gold);
}
bool ok = true;
for (int i = 0; i < num_neurons; i++) {
if (fabsf(spikes[i] - spikes_gold[i]) > 1e-3) {
printf("@%d: %f %f\n", i, spikes[i], spikes_gold[i]);
ok = false;
break;
}
}
free(encode_result);
free(voltage);
free(voltage_gold);
free(reftime);
free(reftime_gold);
free(bias);
free(gain);
free(spikes);
free(spikes_gold);
cudaFree(d_encode_result);
cudaFree(d_voltage);
cudaFree(d_reftime);
cudaFree(d_bias);
cudaFree(d_gain);
cudaFree(d_spikes);
printf("%s\n", ok ? "PASS" : "FAIL");
return 0;
}
|
005a28e23ba8aeac5cee788de323afeb25535894.hip | // !!! This is a file automatically generated by hipify!!!
/*
% Function: receiver
By: Mohammed Osama & Khaled Ahmed
*/
#include "sc_fdma_demodulator.cuh"
#include "generate_dmrs_pusch_hip.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
#include "transform_predecoder.cuh"
#include "decompose_subframe.cuh"
#include "demapper.cuh"
#include "descrambler.cuh"
#include "deinterleaver_hip.cuh"
#include "channel_estimation.cuh"
#include "channel_equalization_zf.cuh"
int main(int argc, char **argv) {
// Random input
hipfftComplex* subframe_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*modulated_subframe_length);
hipfftComplex* subframe_h2 = (hipfftComplex *)malloc(sizeof(hipfftComplex)*modulated_subframe_length);
hipfftComplex* subframe_h3 = (hipfftComplex *)malloc(sizeof(hipfftComplex)*modulated_subframe_length);
hipfftComplex* subframe_h4 = (hipfftComplex *)malloc(sizeof(hipfftComplex)*modulated_subframe_length);
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h2[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h2[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h3[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h3[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h4[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h4[i].y = rand() / (float)RAND_MAX * 0.5;
}
//For timing purpose
timerInit();
startTimer();
//Parameters
const int Qm = 6; // Modulation Order(2 = QPSK, 4 = 16QAM, 6 = 64QAM)
const int M_pusch_rb = 100; //number of resource blocks assigned to the UE
const int N_l = 4; // Number of Layers
const int N_ri = 0; //length of ri symbols
const int n_s = 0; //assume UE send on subframe 0
const int N_id_cell = 2; //assume enodeB scheduled cell 2 for the UE
const int M_pusch_sc = N_sc_rb * M_pusch_rb; //total number of subcarriers
const int n_RNTI = 10; //radio network temporary identifier given to the UE by enodeB (assume 10)
const int N_bits = Qm * 12 * M_pusch_sc; //Qm * 12 * M_pusch_sc = 6*12*1200
//Recieved subframes
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell);
//Copy (c) to Device
Byte* c_d = 0;
hipMalloc((void **)&c_d, sizeof(Byte)*Qm * 12 * M_pusch_sc);
hipMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, hipMemcpyHostToDevice);
hipfftComplex* subframe_d;
hipfftComplex* subframe_d2;
hipfftComplex* subframe_d3;
hipfftComplex* subframe_d4;
hipMalloc((void **)&subframe_d, sizeof(hipfftComplex)*modulated_subframe_length);
hipMalloc((void **)&subframe_d2, sizeof(hipfftComplex)*modulated_subframe_length);
hipMalloc((void **)&subframe_d3, sizeof(hipfftComplex)*modulated_subframe_length);
hipMalloc((void **)&subframe_d4, sizeof(hipfftComplex)*modulated_subframe_length);
hipMemcpyAsync(subframe_d , subframe_h , sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice);
hipMemcpyAsync(subframe_d2, subframe_h2, sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice);
hipMemcpyAsync(subframe_d3, subframe_h3, sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice);
hipMemcpyAsync(subframe_d4, subframe_h4, sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice);
stopTimer("Time of copying of data to device= %.6f ms\n", elapsed);
startTimer();
//Device data allocation
hipfftComplex* fft_vec_d;
hipfftComplex* fft_vec_d2;
hipfftComplex* fft_vec_d3;
hipfftComplex* fft_vec_d4;
hipfftComplex* demod_subframe_d;
hipfftComplex* demod_subframe_d2;
hipfftComplex* demod_subframe_d3;
hipfftComplex* demod_subframe_d4;
hipfftComplex* demod_subframe_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipfftComplex* demod_subframe_h2 = (hipfftComplex*)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipfftComplex* demod_subframe_h3 = (hipfftComplex*)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipfftComplex* demod_subframe_h4 = (hipfftComplex*)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipMalloc((void **)&fft_vec_d, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&fft_vec_d2, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&fft_vec_d3, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&fft_vec_d4, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&demod_subframe_d, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipMalloc((void **)&demod_subframe_d2, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipMalloc((void **)&demod_subframe_d3, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
hipMalloc((void **)&demod_subframe_d4, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
//DMRS section
//hipfftComplex* x_q_d;
//hipfftComplex* x_q_d2;
//hipfftComplex* x_q_d3;
//hipfftComplex* x_q_d4;
//hipfftComplex* dmrs1_generated_d = 0, *dmrs2_generated_d = 0;
//hipfftComplex* dmrs1_generated_d2 = 0, *dmrs2_generated_d2 = 0;
//hipMalloc((void **)&dmrs1_generated_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
//hipMalloc((void **)&dmrs2_generated_d, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
//hipMalloc((void **)&dmrs1_generated_d2, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
//hipMalloc((void **)&dmrs2_generated_d2, sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
//hipMalloc((void **)&x_q_d, sizeof(hipfftComplex)*prime_nums[M_pusch_rb - 1]);
//hipMalloc((void **)&x_q_d2, sizeof(hipfftComplex)*prime_nums[M_pusch_rb - 1]);
hipfftComplex* dmrs1_decomposed_d;
hipfftComplex* dmrs2_decomposed_d;
hipfftComplex* dmrs1_decomposed_d2;
hipfftComplex* dmrs2_decomposed_d2;
hipfftComplex* dmrs1_decomposed_d3;
hipfftComplex* dmrs1_decomposed_d4;
hipfftComplex* dmrs2_decomposed_d3;
hipfftComplex* dmrs2_decomposed_d4;
hipfftComplex* complex_data_d;
hipfftComplex* complex_data_d2;
hipfftComplex* complex_data_d3;
hipfftComplex* complex_data_d4;
hipfftComplex* complex_data_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* complex_data_h2 = (hipfftComplex*)malloc(sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* complex_data_h3 = (hipfftComplex*)malloc(sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* complex_data_h4 = (hipfftComplex*)malloc(sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipMalloc((void **)&complex_data_d, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipMalloc((void **)&complex_data_d2, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipMalloc((void **)&complex_data_d3, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipMalloc((void **)&complex_data_d4, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipMalloc((void **)&dmrs1_decomposed_d, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs1_decomposed_d2, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs2_decomposed_d, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs2_decomposed_d2, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs1_decomposed_d3, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs1_decomposed_d4, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs2_decomposed_d3, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&dmrs2_decomposed_d4, sizeof(hipfftComplex)*M_pusch_sc);
// Channel estimation and equaliuzation allocation
/*hipfftComplex* channel, *equalized_subframe_d;
hipMalloc((void **)&channel, sizeof(hipfftComplex)*M_pusch_sc);
hipMalloc((void **)&equalized_subframe_d, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* equalized_subframe_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* channel_h = (hipfftComplex*)malloc(sizeof(hipfftComplex)*M_pusch_sc);*/
hipfftComplex* predecoded_data_d;
hipMalloc((void **)&predecoded_data_d, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* predecoded_data_d2;
hipMalloc((void **)&predecoded_data_d2, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* predecoded_data_d3;
hipMalloc((void **)&predecoded_data_d3, sizeof(hipfftComplex)* 12 * M_pusch_sc);
hipfftComplex* predecoded_data_d4;
hipMalloc((void **)&predecoded_data_d4, sizeof(hipfftComplex)* 12 * M_pusch_sc);
Byte *bits_d;
hipMalloc((void **)&bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d2;
hipMalloc((void **)&bits_d2, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d3;
hipMalloc((void **)&bits_d3, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d4;
hipMalloc((void **)&bits_d4, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *descrambled_bits_d;
hipMalloc((void **)&descrambled_bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d2;
hipMalloc((void **)&descrambled_bits_d2, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d3;
hipMalloc((void **)&descrambled_bits_d3, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d4;
hipMalloc((void **)&descrambled_bits_d4, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h2 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h3 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h4 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
// Step 2: Define R_mux and R_prime_mux
int H_prime_total = N_bits * N_l / (Qm*N_l);
int H_prime = H_prime_total - N_ri;
int R_mux = (H_prime_total*Qm*N_l) / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *ri_d, *y_idx_d, *y_mat_d, *y_mat_h = (Byte *)malloc(sizeof(Byte)*(C_mux*R_mux));
Byte *received_bits_d;
//Byte *received_bits_h = (Byte *)malloc(sizeof(Byte *) * N_bits);
hipMalloc((void **)&ri_d, sizeof(Byte)*(N_ri * Qm * N_l));
hipMalloc((void **)&y_idx_d, sizeof(Byte)*(C_mux*R_prime_mux));
hipMalloc((void **)&y_mat_d, sizeof(Byte)*(C_mux*R_mux));
hipMalloc((void **)&received_bits_d, sizeof(Byte)* H_prime * Qm * N_l);
stopTimer("Allocation Time= %.6f ms\n", elapsed);
startTimer();
//create plans
int n[1] = { FFT_size };
hipfftHandle plan_sc_fdma, plan_sc_fdma2, plan_sc_fdma3, plan_sc_fdma4;
hipfftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
hipfftPlanMany(&plan_sc_fdma2, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
hipfftPlanMany(&plan_sc_fdma3, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
hipfftPlanMany(&plan_sc_fdma4, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
int N_SIGS = 12; //signal_size/M_pusch_sc = 12 * M_pusch_sc / M_pusch_sc = 12
n[0] = { M_pusch_sc };
hipfftHandle plan_transform_predecoder;
hipfftHandle plan_transform_predecoder2;
hipfftHandle plan_transform_predecoder3;
hipfftHandle plan_transform_predecoder4;
hipfftPlanMany(&plan_transform_predecoder, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS);
hipfftPlanMany(&plan_transform_predecoder2, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS);
hipfftPlanMany(&plan_transform_predecoder3, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS);
hipfftPlanMany(&plan_transform_predecoder4, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, HIPFFT_C2C, N_SIGS);
stopTimer("Time of plan creation= %.6f ms\n", elapsed);
startTimer();
//sc-fdma demodulation
sc_fdma_demodulator(subframe_d, M_pusch_rb, &demod_subframe_d, plan_sc_fdma, fft_vec_d);
sc_fdma_demodulator(subframe_d2, M_pusch_rb, &demod_subframe_d2, plan_sc_fdma2, fft_vec_d2);
sc_fdma_demodulator(subframe_d3, M_pusch_rb, &demod_subframe_d3, plan_sc_fdma3, fft_vec_d3);
sc_fdma_demodulator(subframe_d4, M_pusch_rb, &demod_subframe_d4, plan_sc_fdma4, fft_vec_d4);
//hipMemcpy(demod_subframe_h, demod_subframe_d, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(demod_subframe_h2, demod_subframe_d2, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(demod_subframe_h3, demod_subframe_d3, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(demod_subframe_h4, demod_subframe_d4, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc, hipMemcpyDeviceToHost);
//generate dmrs
//generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs1_generated_d, &dmrs2_generated_d, x_q_d);
//generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 1, &dmrs1_generated_d2, &dmrs2_generated_d2, x_q_d2);
//Decompose subframe
decompose_subframe(demod_subframe_d, M_pusch_rb, &complex_data_d, &dmrs1_decomposed_d, &dmrs2_decomposed_d);
decompose_subframe(demod_subframe_d2, M_pusch_rb, &complex_data_d2, &dmrs1_decomposed_d2, &dmrs2_decomposed_d2);
decompose_subframe(demod_subframe_d3, M_pusch_rb, &complex_data_d3, &dmrs1_decomposed_d3, &dmrs2_decomposed_d3);
decompose_subframe(demod_subframe_d4, M_pusch_rb, &complex_data_d4, &dmrs1_decomposed_d4, &dmrs2_decomposed_d4);
//hipMemcpy(complex_data_h, complex_data_d, sizeof(hipfftComplex)* 12 * M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(complex_data_h2, complex_data_d2, sizeof(hipfftComplex)* 12 * M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(complex_data_h3, complex_data_d3, sizeof(hipfftComplex)* 12 * M_pusch_sc, hipMemcpyDeviceToHost);
//hipMemcpy(complex_data_h4, complex_data_d4, sizeof(hipfftComplex)* 12 * M_pusch_sc, hipMemcpyDeviceToHost);
//Channel estimation
//channe_estimation(dmrs1_decomposed_d, dmrs2_decomposed_d, dmrs1_generated_d, dmrs2_generated_d, M_pusch_sc, &channel);
//hipMemcpy(channel_h, channel, sizeof(hipfftComplex)* M_pusch_sc, hipMemcpyDeviceToHost);
//Equalization ZF
//channel_equalization_zf(demod_subframe_d, M_pusch_sc , channel, &equalized_subframe_d);
//predecoding
transform_predecoder(complex_data_d , &predecoded_data_d, plan_transform_predecoder); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d2, &predecoded_data_d2, plan_transform_predecoder2); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d3, &predecoded_data_d3, plan_transform_predecoder3); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d4, &predecoded_data_d4, plan_transform_predecoder4); //signal_size = 12 * M_pusch_sc
//demapping
demapper(predecoded_data_d, M_pusch_rb, &bits_d, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d2, M_pusch_rb, &bits_d2, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d3, M_pusch_rb, &bits_d3, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d4, M_pusch_rb, &bits_d4, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
//Descrammpling
descrambler(bits_d, &descrambled_bits_d, c_d, N_bits);
descrambler(bits_d2, &descrambled_bits_d2, c_d, N_bits);
descrambler(bits_d3, &descrambled_bits_d3, c_d, N_bits);
descrambler(bits_d4, &descrambled_bits_d4, c_d, N_bits);
/*hipMemcpy(descrambled_bits_h, descrambled_bits_d, sizeof(Byte) * N_bits, hipMemcpyDeviceToHost);
hipMemcpy(descrambled_bits_h2, descrambled_bits_d2, sizeof(Byte) * N_bits, hipMemcpyDeviceToHost);
hipMemcpy(descrambled_bits_h3, descrambled_bits_d3, sizeof(Byte)* N_bits, hipMemcpyDeviceToHost);
hipMemcpy(descrambled_bits_h4, descrambled_bits_d4, sizeof(Byte)* N_bits, hipMemcpyDeviceToHost);*/
//deinterleaver
deinterleaver(descrambled_bits_d, descrambled_bits_d2, descrambled_bits_d3, descrambled_bits_d4, &ri_d, &received_bits_d, N_bits*N_l, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//test y_mat
//hipMemcpy(y_mat_h, y_mat_d, sizeof(Byte)*(C_mux*R_mux), hipMemcpyDeviceToHost);
//Retrieve data from device
Byte* received_bits_h = (Byte*)malloc(sizeof(Byte)*N_bits * N_l);
hipMemcpy(received_bits_h, received_bits_d, sizeof(Byte)*N_bits* N_l, hipMemcpyDeviceToHost);
Byte* ri_h = (Byte*)malloc(sizeof(Byte)*N_ri * Qm * N_l);
hipMemcpy(ri_h, ri_d, sizeof(Byte)*N_ri * Qm * N_l, hipMemcpyDeviceToHost);
stopTimer("Time of processing= %.6f ms\n", elapsed);
//Print results
/*for (int i = 0; i < H_prime * Qm * N_l; i++)
{
printf("idx = %d \t %d \n", i + 1, received_bits_h[i]);
}
*/
//test file
FILE *results1;
if ((results1 = freopen("Receiver_test.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//------------------------------------------------input subframe------------------------------------------//
//input
//first subframe
printf("clear; clc;\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//second subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h2[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h2[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input2_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//third subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h3[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h3[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input3_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//fourth subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h4[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h4[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input4_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
printf("subframe_input_CUDA = [ subframe_CUDA; subframe_input2_CUDA; subframe_input3_CUDA; subframe_input4_CUDA];\n");
//printf("subframe_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//// Channel estimation
//printf("x = [ ");
//for (int i = 0; i < M_pusch_sc; i++)
//{
// printf("%f ", channel_h[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < M_pusch_sc; i++)
//{
// printf("%f ", channel_h[i].y);
//}
//printf(" ];\n ");
//printf("channel_cuda = x + 1i * y;\n");
// channel equalization
/*printf("x = [ ");
for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++)
{
printf("%f ", equalized_subframe_h[i].x);
}
printf(" ]; ");
printf("\n");
printf("y = [ ");
for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++)
{
printf("%f ", equalized_subframe_h[i].y);
}
printf(" ];\n ");
printf("equalized_subframe_h = x + 1i * y;\n");*/
//------------------------------------------------demod subframe------------------------------------------//
// sc-fdma_demodulation
// first demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h = x + 1i * y;\n");
////second demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h2[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h2[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h2 = x + 1i * y;\n");
////third demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h3[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h3[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h3 = x + 1i * y;\n");
////forth demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h4[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h4[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h4 = x + 1i * y;\n");
//------------------------------------------------decompose subframe------------------------------------------//
// test decompose subfram
// first
//
//------------------------------------------------descrambled bits------------------------------------------//
//descrambled Bits
// first
//printf("\ndescrambled_bits_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
//// second
//printf("\ndescrambled_bits2_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h2[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
////third
//printf("\ndescrambled_bits3_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h3[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
////forth
//printf("\ndescrambled_bits4_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h4[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
//------------------------------------------------Recieved bits------------------------------------------//
//printf("\ny_mat_h_cuda = [ ");
//for (int i = 0; i < (C_mux*R_mux); i++)
//{
// printf("%d", y_mat_h[i]);
// if (i != ((Qm * 12 * M_pusch_sc *N_l) - 1))
// printf(",");
//}
//printf(" ];\n");
//------------------------------------------------Recieved bits------------------------------------------//
//Received Bits
printf("\nReceved_bits_cuda = [ ");
for (int i = 0; i < (N_bits * N_l); i++)
{
printf("%d", received_bits_h[i]);
if (i != ((Qm * 12 * M_pusch_sc * N_l) - 1))
printf(",");
}
printf(" ];\n");
//RI Bits
printf("\nRI_bits_cuda = [ ");
for (int i = 0; i < (N_ri * Qm * N_l); i++)
{
printf("%d", ri_h[i]);
if (i != ((N_ri * Qm * N_l) - 1))
printf(",");
}
printf(" ];\n");
//printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 2;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator(modulated_subframe, M_pusch_rb);\ndemodulated_subframe_vect =[demodulated_subframe(0+1,:), demodulated_subframe(1+1,:), demodulated_subframe(2+1,:), demodulated_subframe(4+1,:), demodulated_subframe(5+1,:), demodulated_subframe(6+1,:), demodulated_subframe(7+1,:), demodulated_subframe(8+1,:), demodulated_subframe(9+1,:), demodulated_subframe(11+1,:), demodulated_subframe(12+1,:), demodulated_subframe(13+1,:)];\ndmrs = generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\ndmrs_1_rx = demodulated_subframe(1+3,:);\ndmrs_2_rx = demodulated_subframe(1+10,:);\npredecoded_data = transform_predecoder(demodulated_subframe_vect, M_pusch_rb);\n demapped_data = demapper_hard(predecoded_data, '64qam');\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descramble(demapped_data, c);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver(descrambled_bits, N_ri_bits, N_ack_bits, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\nisequal(ri_bits, RI_bits_cuda)\n");
printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 4;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator_MIMO(modulated_subframe, M_pusch_rb, N_l);\nDecoded_streams = decompose_subframe_mimo(demodulated_subframe, N_l);\ntransform_predecoded_symbols = transform_predecoder_mimo(Decoded_streams.', M_pusch_rb, N_l);\n [layer1, layer2, layer3, layer4] = layer_demapping(transform_predecoded_symbols, N_l);\ndemapped_bits1 = transpose(demapper_hard(layer1, '64qam'));\ndemapped_bits2 = transpose(demapper_hard(layer2, '64qam'));\ndemapped_bits3 = transpose(demapper_hard(layer3, '64qam'));\ndemapped_bits4 = transpose(demapper_hard(layer4, '64qam'));\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descrambler_MIMO([demapped_bits1 demapped_bits2 demapped_bits3 demapped_bits4].', [c; c; c; c], N_l);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver_MIMO(descrambled_bits.', 0, 0, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\n");
//printf("sum(round(complex_data_h,6)-round(Decoded_streams(1,:),6))\n");
//printf("sum(round(complex_data_h2,6)-round(Decoded_streams(2,:),6))\n");
/*printf("isequal(descrambled_bits_cuda, descrambled_bits(1,:))\n");
printf("isequal(descrambled_bits2_cuda, descrambled_bits(2,:))\n");
printf("isequal(descrambled_bits3_cuda, descrambled_bits(3,:))\n");
printf("isequal(descrambled_bits4_cuda, descrambled_bits(4,:))");*/
// printf("sum(round(Receved_bits_cuda,6)-round(,6))\n");
fclose(results1);
return 0;
} | 005a28e23ba8aeac5cee788de323afeb25535894.cu | /*
% Function: receiver
By: Mohammed Osama & Khaled Ahmed
*/
#include "sc_fdma_demodulator.cuh"
#include "generate_dmrs_pusch.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
#include "transform_predecoder.cuh"
#include "decompose_subframe.cuh"
#include "demapper.cuh"
#include "descrambler.cuh"
#include "deinterleaver.cuh"
#include "channel_estimation.cuh"
#include "channel_equalization_zf.cuh"
int main(int argc, char **argv) {
// Random input
cufftComplex* subframe_h = (cufftComplex *)malloc(sizeof(cufftComplex)*modulated_subframe_length);
cufftComplex* subframe_h2 = (cufftComplex *)malloc(sizeof(cufftComplex)*modulated_subframe_length);
cufftComplex* subframe_h3 = (cufftComplex *)malloc(sizeof(cufftComplex)*modulated_subframe_length);
cufftComplex* subframe_h4 = (cufftComplex *)malloc(sizeof(cufftComplex)*modulated_subframe_length);
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h2[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h2[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h3[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h3[i].y = rand() / (float)RAND_MAX * 0.5;
}
for (int i = 0; i < modulated_subframe_length; i++)
{
subframe_h4[i].x = rand() / (float)RAND_MAX * 0.5;
subframe_h4[i].y = rand() / (float)RAND_MAX * 0.5;
}
//For timing purpose
timerInit();
startTimer();
//Parameters
const int Qm = 6; // Modulation Order(2 = QPSK, 4 = 16QAM, 6 = 64QAM)
const int M_pusch_rb = 100; //number of resource blocks assigned to the UE
const int N_l = 4; // Number of Layers
const int N_ri = 0; //length of ri symbols
const int n_s = 0; //assume UE send on subframe 0
const int N_id_cell = 2; //assume enodeB scheduled cell 2 for the UE
const int M_pusch_sc = N_sc_rb * M_pusch_rb; //total number of subcarriers
const int n_RNTI = 10; //radio network temporary identifier given to the UE by enodeB (assume 10)
const int N_bits = Qm * 12 * M_pusch_sc; //Qm * 12 * M_pusch_sc = 6*12*1200
//Recieved subframes
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, N_bits, n_RNTI, n_s, N_id_cell);
//Copy (c) to Device
Byte* c_d = 0;
cudaMalloc((void **)&c_d, sizeof(Byte)*Qm * 12 * M_pusch_sc);
cudaMemcpyAsync(c_d, c_h, sizeof(Byte)*N_bits, cudaMemcpyHostToDevice);
cufftComplex* subframe_d;
cufftComplex* subframe_d2;
cufftComplex* subframe_d3;
cufftComplex* subframe_d4;
cudaMalloc((void **)&subframe_d, sizeof(cufftComplex)*modulated_subframe_length);
cudaMalloc((void **)&subframe_d2, sizeof(cufftComplex)*modulated_subframe_length);
cudaMalloc((void **)&subframe_d3, sizeof(cufftComplex)*modulated_subframe_length);
cudaMalloc((void **)&subframe_d4, sizeof(cufftComplex)*modulated_subframe_length);
cudaMemcpyAsync(subframe_d , subframe_h , sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice);
cudaMemcpyAsync(subframe_d2, subframe_h2, sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice);
cudaMemcpyAsync(subframe_d3, subframe_h3, sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice);
cudaMemcpyAsync(subframe_d4, subframe_h4, sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice);
stopTimer("Time of copying of data to device= %.6f ms\n", elapsed);
startTimer();
//Device data allocation
cufftComplex* fft_vec_d;
cufftComplex* fft_vec_d2;
cufftComplex* fft_vec_d3;
cufftComplex* fft_vec_d4;
cufftComplex* demod_subframe_d;
cufftComplex* demod_subframe_d2;
cufftComplex* demod_subframe_d3;
cufftComplex* demod_subframe_d4;
cufftComplex* demod_subframe_h = (cufftComplex*)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cufftComplex* demod_subframe_h2 = (cufftComplex*)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cufftComplex* demod_subframe_h3 = (cufftComplex*)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cufftComplex* demod_subframe_h4 = (cufftComplex*)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cudaMalloc((void **)&fft_vec_d, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&fft_vec_d2, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&fft_vec_d3, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&fft_vec_d4, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&demod_subframe_d, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cudaMalloc((void **)&demod_subframe_d2, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cudaMalloc((void **)&demod_subframe_d3, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
cudaMalloc((void **)&demod_subframe_d4, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
//DMRS section
//cufftComplex* x_q_d;
//cufftComplex* x_q_d2;
//cufftComplex* x_q_d3;
//cufftComplex* x_q_d4;
//cufftComplex* dmrs1_generated_d = 0, *dmrs2_generated_d = 0;
//cufftComplex* dmrs1_generated_d2 = 0, *dmrs2_generated_d2 = 0;
//cudaMalloc((void **)&dmrs1_generated_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
//cudaMalloc((void **)&dmrs2_generated_d, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
//cudaMalloc((void **)&dmrs1_generated_d2, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
//cudaMalloc((void **)&dmrs2_generated_d2, sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
//cudaMalloc((void **)&x_q_d, sizeof(cufftComplex)*prime_nums[M_pusch_rb - 1]);
//cudaMalloc((void **)&x_q_d2, sizeof(cufftComplex)*prime_nums[M_pusch_rb - 1]);
cufftComplex* dmrs1_decomposed_d;
cufftComplex* dmrs2_decomposed_d;
cufftComplex* dmrs1_decomposed_d2;
cufftComplex* dmrs2_decomposed_d2;
cufftComplex* dmrs1_decomposed_d3;
cufftComplex* dmrs1_decomposed_d4;
cufftComplex* dmrs2_decomposed_d3;
cufftComplex* dmrs2_decomposed_d4;
cufftComplex* complex_data_d;
cufftComplex* complex_data_d2;
cufftComplex* complex_data_d3;
cufftComplex* complex_data_d4;
cufftComplex* complex_data_h = (cufftComplex*)malloc(sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* complex_data_h2 = (cufftComplex*)malloc(sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* complex_data_h3 = (cufftComplex*)malloc(sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* complex_data_h4 = (cufftComplex*)malloc(sizeof(cufftComplex)* 12 * M_pusch_sc);
cudaMalloc((void **)&complex_data_d, sizeof(cufftComplex)* 12 * M_pusch_sc);
cudaMalloc((void **)&complex_data_d2, sizeof(cufftComplex)* 12 * M_pusch_sc);
cudaMalloc((void **)&complex_data_d3, sizeof(cufftComplex)* 12 * M_pusch_sc);
cudaMalloc((void **)&complex_data_d4, sizeof(cufftComplex)* 12 * M_pusch_sc);
cudaMalloc((void **)&dmrs1_decomposed_d, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs1_decomposed_d2, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs2_decomposed_d, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs2_decomposed_d2, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs1_decomposed_d3, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs1_decomposed_d4, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs2_decomposed_d3, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&dmrs2_decomposed_d4, sizeof(cufftComplex)*M_pusch_sc);
// Channel estimation and equaliuzation allocation
/*cufftComplex* channel, *equalized_subframe_d;
cudaMalloc((void **)&channel, sizeof(cufftComplex)*M_pusch_sc);
cudaMalloc((void **)&equalized_subframe_d, sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* equalized_subframe_h = (cufftComplex*)malloc(sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* channel_h = (cufftComplex*)malloc(sizeof(cufftComplex)*M_pusch_sc);*/
cufftComplex* predecoded_data_d;
cudaMalloc((void **)&predecoded_data_d, sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* predecoded_data_d2;
cudaMalloc((void **)&predecoded_data_d2, sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* predecoded_data_d3;
cudaMalloc((void **)&predecoded_data_d3, sizeof(cufftComplex)* 12 * M_pusch_sc);
cufftComplex* predecoded_data_d4;
cudaMalloc((void **)&predecoded_data_d4, sizeof(cufftComplex)* 12 * M_pusch_sc);
Byte *bits_d;
cudaMalloc((void **)&bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d2;
cudaMalloc((void **)&bits_d2, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d3;
cudaMalloc((void **)&bits_d3, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *bits_d4;
cudaMalloc((void **)&bits_d4, sizeof(Byte)* Qm * 12 * M_pusch_sc); //FIX Number_demaped_bits
Byte *descrambled_bits_d;
cudaMalloc((void **)&descrambled_bits_d, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d2;
cudaMalloc((void **)&descrambled_bits_d2, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d3;
cudaMalloc((void **)&descrambled_bits_d3, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_d4;
cudaMalloc((void **)&descrambled_bits_d4, sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h2 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h3 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
Byte *descrambled_bits_h4 = (Byte *)malloc(sizeof(Byte)* Qm * 12 * M_pusch_sc);
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
// Step 2: Define R_mux and R_prime_mux
int H_prime_total = N_bits * N_l / (Qm*N_l);
int H_prime = H_prime_total - N_ri;
int R_mux = (H_prime_total*Qm*N_l) / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *ri_d, *y_idx_d, *y_mat_d, *y_mat_h = (Byte *)malloc(sizeof(Byte)*(C_mux*R_mux));
Byte *received_bits_d;
//Byte *received_bits_h = (Byte *)malloc(sizeof(Byte *) * N_bits);
cudaMalloc((void **)&ri_d, sizeof(Byte)*(N_ri * Qm * N_l));
cudaMalloc((void **)&y_idx_d, sizeof(Byte)*(C_mux*R_prime_mux));
cudaMalloc((void **)&y_mat_d, sizeof(Byte)*(C_mux*R_mux));
cudaMalloc((void **)&received_bits_d, sizeof(Byte)* H_prime * Qm * N_l);
stopTimer("Allocation Time= %.6f ms\n", elapsed);
startTimer();
//create plans
int n[1] = { FFT_size };
cufftHandle plan_sc_fdma, plan_sc_fdma2, plan_sc_fdma3, plan_sc_fdma4;
cufftPlanMany(&plan_sc_fdma, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
cufftPlanMany(&plan_sc_fdma2, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
cufftPlanMany(&plan_sc_fdma3, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
cufftPlanMany(&plan_sc_fdma4, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
int N_SIGS = 12; //signal_size/M_pusch_sc = 12 * M_pusch_sc / M_pusch_sc = 12
n[0] = { M_pusch_sc };
cufftHandle plan_transform_predecoder;
cufftHandle plan_transform_predecoder2;
cufftHandle plan_transform_predecoder3;
cufftHandle plan_transform_predecoder4;
cufftPlanMany(&plan_transform_predecoder, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS);
cufftPlanMany(&plan_transform_predecoder2, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS);
cufftPlanMany(&plan_transform_predecoder3, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS);
cufftPlanMany(&plan_transform_predecoder4, 1, n, NULL, 1, M_pusch_sc, NULL, 1, M_pusch_sc, CUFFT_C2C, N_SIGS);
stopTimer("Time of plan creation= %.6f ms\n", elapsed);
startTimer();
//sc-fdma demodulation
sc_fdma_demodulator(subframe_d, M_pusch_rb, &demod_subframe_d, plan_sc_fdma, fft_vec_d);
sc_fdma_demodulator(subframe_d2, M_pusch_rb, &demod_subframe_d2, plan_sc_fdma2, fft_vec_d2);
sc_fdma_demodulator(subframe_d3, M_pusch_rb, &demod_subframe_d3, plan_sc_fdma3, fft_vec_d3);
sc_fdma_demodulator(subframe_d4, M_pusch_rb, &demod_subframe_d4, plan_sc_fdma4, fft_vec_d4);
//cudaMemcpy(demod_subframe_h, demod_subframe_d, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(demod_subframe_h2, demod_subframe_d2, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(demod_subframe_h3, demod_subframe_d3, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(demod_subframe_h4, demod_subframe_d4, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc, cudaMemcpyDeviceToHost);
//generate dmrs
//generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 0, &dmrs1_generated_d, &dmrs2_generated_d, x_q_d);
//generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, 1, &dmrs1_generated_d2, &dmrs2_generated_d2, x_q_d2);
//Decompose subframe
decompose_subframe(demod_subframe_d, M_pusch_rb, &complex_data_d, &dmrs1_decomposed_d, &dmrs2_decomposed_d);
decompose_subframe(demod_subframe_d2, M_pusch_rb, &complex_data_d2, &dmrs1_decomposed_d2, &dmrs2_decomposed_d2);
decompose_subframe(demod_subframe_d3, M_pusch_rb, &complex_data_d3, &dmrs1_decomposed_d3, &dmrs2_decomposed_d3);
decompose_subframe(demod_subframe_d4, M_pusch_rb, &complex_data_d4, &dmrs1_decomposed_d4, &dmrs2_decomposed_d4);
//cudaMemcpy(complex_data_h, complex_data_d, sizeof(cufftComplex)* 12 * M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(complex_data_h2, complex_data_d2, sizeof(cufftComplex)* 12 * M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(complex_data_h3, complex_data_d3, sizeof(cufftComplex)* 12 * M_pusch_sc, cudaMemcpyDeviceToHost);
//cudaMemcpy(complex_data_h4, complex_data_d4, sizeof(cufftComplex)* 12 * M_pusch_sc, cudaMemcpyDeviceToHost);
//Channel estimation
//channe_estimation(dmrs1_decomposed_d, dmrs2_decomposed_d, dmrs1_generated_d, dmrs2_generated_d, M_pusch_sc, &channel);
//cudaMemcpy(channel_h, channel, sizeof(cufftComplex)* M_pusch_sc, cudaMemcpyDeviceToHost);
//Equalization ZF
//channel_equalization_zf(demod_subframe_d, M_pusch_sc , channel, &equalized_subframe_d);
//predecoding
transform_predecoder(complex_data_d , &predecoded_data_d, plan_transform_predecoder); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d2, &predecoded_data_d2, plan_transform_predecoder2); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d3, &predecoded_data_d3, plan_transform_predecoder3); //signal_size = 12 * M_pusch_sc
transform_predecoder(complex_data_d4, &predecoded_data_d4, plan_transform_predecoder4); //signal_size = 12 * M_pusch_sc
//demapping
demapper(predecoded_data_d, M_pusch_rb, &bits_d, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d2, M_pusch_rb, &bits_d2, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d3, M_pusch_rb, &bits_d3, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
demapper(predecoded_data_d4, M_pusch_rb, &bits_d4, Qm * 12 * M_pusch_sc, Qm); //Number_demaped_bits = Qm * 12 * M_pusch_sc
//Descrammpling
descrambler(bits_d, &descrambled_bits_d, c_d, N_bits);
descrambler(bits_d2, &descrambled_bits_d2, c_d, N_bits);
descrambler(bits_d3, &descrambled_bits_d3, c_d, N_bits);
descrambler(bits_d4, &descrambled_bits_d4, c_d, N_bits);
/*cudaMemcpy(descrambled_bits_h, descrambled_bits_d, sizeof(Byte) * N_bits, cudaMemcpyDeviceToHost);
cudaMemcpy(descrambled_bits_h2, descrambled_bits_d2, sizeof(Byte) * N_bits, cudaMemcpyDeviceToHost);
cudaMemcpy(descrambled_bits_h3, descrambled_bits_d3, sizeof(Byte)* N_bits, cudaMemcpyDeviceToHost);
cudaMemcpy(descrambled_bits_h4, descrambled_bits_d4, sizeof(Byte)* N_bits, cudaMemcpyDeviceToHost);*/
//deinterleaver
deinterleaver(descrambled_bits_d, descrambled_bits_d2, descrambled_bits_d3, descrambled_bits_d4, &ri_d, &received_bits_d, N_bits*N_l, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//test y_mat
//cudaMemcpy(y_mat_h, y_mat_d, sizeof(Byte)*(C_mux*R_mux), cudaMemcpyDeviceToHost);
//Retrieve data from device
Byte* received_bits_h = (Byte*)malloc(sizeof(Byte)*N_bits * N_l);
cudaMemcpy(received_bits_h, received_bits_d, sizeof(Byte)*N_bits* N_l, cudaMemcpyDeviceToHost);
Byte* ri_h = (Byte*)malloc(sizeof(Byte)*N_ri * Qm * N_l);
cudaMemcpy(ri_h, ri_d, sizeof(Byte)*N_ri * Qm * N_l, cudaMemcpyDeviceToHost);
stopTimer("Time of processing= %.6f ms\n", elapsed);
//Print results
/*for (int i = 0; i < H_prime * Qm * N_l; i++)
{
printf("idx = %d \t %d \n", i + 1, received_bits_h[i]);
}
*/
//test file
FILE *results1;
if ((results1 = freopen("Receiver_test.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//------------------------------------------------input subframe------------------------------------------//
//input
//first subframe
printf("clear; clc;\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//second subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h2[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h2[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input2_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//third subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h3[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h3[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input3_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//fourth subframe
printf("\nsymbols_in_real = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h4[i].x);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\nsymbols_in_imag = [ ");
for (int i = 0; i < (modulated_subframe_length); i++)
{
printf("%10f", subframe_h4[i].y);
if (i != ((modulated_subframe_length)-1))
printf(",");
}
printf(" ];\n");
printf("subframe_input4_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
printf("subframe_input_CUDA = [ subframe_CUDA; subframe_input2_CUDA; subframe_input3_CUDA; subframe_input4_CUDA];\n");
//printf("subframe_input_CUDA = symbols_in_real + 1i * symbols_in_imag;\n");
//// Channel estimation
//printf("x = [ ");
//for (int i = 0; i < M_pusch_sc; i++)
//{
// printf("%f ", channel_h[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < M_pusch_sc; i++)
//{
// printf("%f ", channel_h[i].y);
//}
//printf(" ];\n ");
//printf("channel_cuda = x + 1i * y;\n");
// channel equalization
/*printf("x = [ ");
for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++)
{
printf("%f ", equalized_subframe_h[i].x);
}
printf(" ]; ");
printf("\n");
printf("y = [ ");
for (int i = 0; i < (M_pusch_sc*N_data_symbs_per_subframe); i++)
{
printf("%f ", equalized_subframe_h[i].y);
}
printf(" ];\n ");
printf("equalized_subframe_h = x + 1i * y;\n");*/
//------------------------------------------------demod subframe------------------------------------------//
// sc-fdma_demodulation
// first demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h = x + 1i * y;\n");
////second demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h2[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h2[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h2 = x + 1i * y;\n");
////third demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h3[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h3[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h3 = x + 1i * y;\n");
////forth demod_subframe
//printf("x = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h4[i].x);
//}
//printf(" ]; ");
//printf("\n");
//printf("y = [ ");
//for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
//{
// printf("%f ", demod_subframe_h4[i].y);
//}
//printf(" ];\n ");
//printf("demod_subframe_h4 = x + 1i * y;\n");
//------------------------------------------------decompose subframe------------------------------------------//
// test decompose subfram
// first
//
//------------------------------------------------descrambled bits------------------------------------------//
//descrambled Bits
// first
//printf("\ndescrambled_bits_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
//// second
//printf("\ndescrambled_bits2_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h2[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
////third
//printf("\ndescrambled_bits3_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h3[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
////forth
//printf("\ndescrambled_bits4_cuda = [ ");
//for (int i = 0; i < (N_bits); i++)
//{
// printf("%d", descrambled_bits_h4[i]);
// if (i != ((Qm * 12 * M_pusch_sc) - 1))
// printf(",");
//}
//printf(" ];\n");
//------------------------------------------------Recieved bits------------------------------------------//
//printf("\ny_mat_h_cuda = [ ");
//for (int i = 0; i < (C_mux*R_mux); i++)
//{
// printf("%d", y_mat_h[i]);
// if (i != ((Qm * 12 * M_pusch_sc *N_l) - 1))
// printf(",");
//}
//printf(" ];\n");
//------------------------------------------------Recieved bits------------------------------------------//
//Received Bits
printf("\nReceved_bits_cuda = [ ");
for (int i = 0; i < (N_bits * N_l); i++)
{
printf("%d", received_bits_h[i]);
if (i != ((Qm * 12 * M_pusch_sc * N_l) - 1))
printf(",");
}
printf(" ];\n");
//RI Bits
printf("\nRI_bits_cuda = [ ");
for (int i = 0; i < (N_ri * Qm * N_l); i++)
{
printf("%d", ri_h[i]);
if (i != ((N_ri * Qm * N_l) - 1))
printf(",");
}
printf(" ];\n");
//printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 2;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator(modulated_subframe, M_pusch_rb);\ndemodulated_subframe_vect =[demodulated_subframe(0+1,:), demodulated_subframe(1+1,:), demodulated_subframe(2+1,:), demodulated_subframe(4+1,:), demodulated_subframe(5+1,:), demodulated_subframe(6+1,:), demodulated_subframe(7+1,:), demodulated_subframe(8+1,:), demodulated_subframe(9+1,:), demodulated_subframe(11+1,:), demodulated_subframe(12+1,:), demodulated_subframe(13+1,:)];\ndmrs = generate_dmrs_pusch(n_s, N_id_cell, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\ndmrs_1_rx = demodulated_subframe(1+3,:);\ndmrs_2_rx = demodulated_subframe(1+10,:);\npredecoded_data = transform_predecoder(demodulated_subframe_vect, M_pusch_rb);\n demapped_data = demapper_hard(predecoded_data, '64qam');\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descramble(demapped_data, c);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver(descrambled_bits, N_ri_bits, N_ack_bits, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\nisequal(ri_bits, RI_bits_cuda)\n");
printf("N_id_cell = 2;N_sc_rb = 12;M_pusch_rb = 100;M_pusch_sc = M_pusch_rb*N_sc_rb;Nc = 1600;n_s = 0;n_RNTI = 10;M_bits = 86400;N_l = 4;\nN_ri_bits = 0;N_ack_bits =0;Q_m = 6;\nmodulated_subframe = subframe_input_CUDA;\ndemodulated_subframe = sc_fdma_demodulator_MIMO(modulated_subframe, M_pusch_rb, N_l);\nDecoded_streams = decompose_subframe_mimo(demodulated_subframe, N_l);\ntransform_predecoded_symbols = transform_predecoder_mimo(Decoded_streams.', M_pusch_rb, N_l);\n [layer1, layer2, layer3, layer4] = layer_demapping(transform_predecoded_symbols, N_l);\ndemapped_bits1 = transpose(demapper_hard(layer1, '64qam'));\ndemapped_bits2 = transpose(demapper_hard(layer2, '64qam'));\ndemapped_bits3 = transpose(demapper_hard(layer3, '64qam'));\ndemapped_bits4 = transpose(demapper_hard(layer4, '64qam'));\n c_init = n_RNTI * 2 ^ 14 + floor(n_s / 2) * 2 ^ 9 + N_id_cell;\n c = generate_psuedo_random_seq(c_init, M_bits);\n descrambled_bits = descrambler_MIMO([demapped_bits1 demapped_bits2 demapped_bits3 demapped_bits4].', [c; c; c; c], N_l);\n [data_bits, ri_bits, ack_bits] = channel_deinterleaver_MIMO(descrambled_bits.', 0, 0, N_l, Q_m); \nisequal(data_bits, Receved_bits_cuda)\n");
//printf("sum(round(complex_data_h,6)-round(Decoded_streams(1,:),6))\n");
//printf("sum(round(complex_data_h2,6)-round(Decoded_streams(2,:),6))\n");
/*printf("isequal(descrambled_bits_cuda, descrambled_bits(1,:))\n");
printf("isequal(descrambled_bits2_cuda, descrambled_bits(2,:))\n");
printf("isequal(descrambled_bits3_cuda, descrambled_bits(3,:))\n");
printf("isequal(descrambled_bits4_cuda, descrambled_bits(4,:))");*/
// printf("sum(round(Receved_bits_cuda,6)-round(,6))\n");
fclose(results1);
return 0;
} |
3739b5077b744f287ab98755de289be99545e327.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
// Location of camera intrinsic file
std::string cam_K_file = "data/camera-intrinsics.txt";
// Location of folder containing RGB-D frames and camera pose files
std::string data_path = "data/rgbd-frames";
int base_frame_idx = 150;
int first_frame_idx = 150;
float num_frames = 50;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = -1.5f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -1.5f;
float voxel_grid_origin_z = 0.5f;
float voxel_size = 0.006f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
cam_K_file = argv[1];
data_path = argv[2];
base_frame_idx = atoi(argv[3]);
first_frame_idx = atoi(argv[4]);
num_frames = atof(argv[5]);
voxel_grid_origin_x = atof(argv[6]);
voxel_grid_origin_y = atof(argv[7]);
voxel_grid_origin_z = atof(argv[8]);
voxel_size = atof(argv[9]);
trunc_margin = atof(argv[10]);
}
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// Read base frame camera pose
std::ostringstream base_frame_prefix;
base_frame_prefix << std::setw(6) << std::setfill('0') << base_frame_idx;
std::string base2world_file = data_path + "/frame-" + base_frame_prefix.str() + ".pose.txt";
std::vector<float> base2world_vec = LoadMatrixFromFile(base2world_file, 4, 4);
std::copy(base2world_vec.begin(), base2world_vec.end(), base2world);
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
hipMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
hipMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
hipMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
hipMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
hipMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
hipMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
for (int frame_idx = first_frame_idx; frame_idx < first_frame_idx + (int)num_frames; ++frame_idx) {
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// // Read current frame depth
std::string depth_im_file = data_path + "/frame-" + curr_frame_prefix.str() + ".depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Read base frame camera pose
std::string cam2world_file = data_path + "/frame-" + curr_frame_prefix.str() + ".pose.txt";
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
hipMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
hipLaunchKernelGGL(( Integrate) , dim3(voxel_grid_dim_z), dim3(voxel_grid_dim_y) , 0, 0, gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
}
// Load TSDF voxel grid from GPU to CPU memory
hipMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
checkCUDA(__LINE__, hipGetLastError());
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
SaveVoxelGrid2SurfacePointCloud("tsdf.ply", voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 1.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::string voxel_grid_saveto_path = "tsdf.bin";
std::ofstream outFile(voxel_grid_saveto_path, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
| 3739b5077b744f287ab98755de289be99545e327.cu | // ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
// Location of camera intrinsic file
std::string cam_K_file = "data/camera-intrinsics.txt";
// Location of folder containing RGB-D frames and camera pose files
std::string data_path = "data/rgbd-frames";
int base_frame_idx = 150;
int first_frame_idx = 150;
float num_frames = 50;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = -1.5f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -1.5f;
float voxel_grid_origin_z = 0.5f;
float voxel_size = 0.006f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
cam_K_file = argv[1];
data_path = argv[2];
base_frame_idx = atoi(argv[3]);
first_frame_idx = atoi(argv[4]);
num_frames = atof(argv[5]);
voxel_grid_origin_x = atof(argv[6]);
voxel_grid_origin_y = atof(argv[7]);
voxel_grid_origin_z = atof(argv[8]);
voxel_size = atof(argv[9]);
trunc_margin = atof(argv[10]);
}
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// Read base frame camera pose
std::ostringstream base_frame_prefix;
base_frame_prefix << std::setw(6) << std::setfill('0') << base_frame_idx;
std::string base2world_file = data_path + "/frame-" + base_frame_prefix.str() + ".pose.txt";
std::vector<float> base2world_vec = LoadMatrixFromFile(base2world_file, 4, 4);
std::copy(base2world_vec.begin(), base2world_vec.end(), base2world);
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
cudaMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
cudaMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
cudaMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
cudaMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
cudaMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
cudaMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
for (int frame_idx = first_frame_idx; frame_idx < first_frame_idx + (int)num_frames; ++frame_idx) {
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// // Read current frame depth
std::string depth_im_file = data_path + "/frame-" + curr_frame_prefix.str() + ".depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Read base frame camera pose
std::string cam2world_file = data_path + "/frame-" + curr_frame_prefix.str() + ".pose.txt";
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
cudaMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
Integrate <<< voxel_grid_dim_z, voxel_grid_dim_y >>>(gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
}
// Load TSDF voxel grid from GPU to CPU memory
cudaMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDA(__LINE__, cudaGetLastError());
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
SaveVoxelGrid2SurfacePointCloud("tsdf.ply", voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 1.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::string voxel_grid_saveto_path = "tsdf.bin";
std::ofstream outFile(voxel_grid_saveto_path, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
|
6b284d8e7ccf1bc42ca8c19db7a8507c25ce3568.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
typedef long long ll_t;
typedef unsigned long long ull_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
__device__ void SM2Cache(
float cache[8][4],
volatile float SM[8][128+4],
int vy, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<4; mi++){
cache[ki][mi] = SM[ki][8*vy + 4*p + mi];
}
}
}
__device__ void thread_matmul(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] = fmaf(a, b, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative squared euclidean distance
__device__ void thread_nseuclidean(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
float dif = a - b;
cCache[mi + 4*p].val[ni] = fmaf(- dif, dif, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative manhattan distance
__device__ void thread_nmanhattan(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] -= fabsf(a - b);
}
}
}
}
__device__ void reduce_dim_1(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iN = gStartx + vx*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iM = gStarty + vy*8 + j;
if (iM < _M_){
val = fmaxf(val, cCache[j].val[i]);
ind = val == cCache[j].val[i] ? iM : ind;
}
}
if (iN < N){
atomicMax(&C[(bid) * _N_ + iN], val);
if (C[(bid) * _N_ + iN] <= val){
D[(bid) * _N_ + iN] = ind;
}
}
}
}
__device__ void reduce_dim_2(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iN = gStartx + vx*8 + j;
if (iN < _N_){
val = fmaxf(val, cCache[i].val[j]);
ind = val == cCache[i].val[j] ? iN : ind;
}
}
if (iM < M){
atomicMax(&C[(bid) * _M_ + iM], val);
if (C[(bid) * _M_ + iM] <= val){
D[(bid) * _M_ + iM] = ind;
}
}
}
}
extern "C"
__global__ void max_sim_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + wx + i*32;
if (wy < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (gStartx + wx + i*32)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
// thread_matmul(aCache, bSM, cCache, vx, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + dy + i*32;
if (dx < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (iM < _M_){
if (dx < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < N){
if (wy < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (iN)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + dx;
int iKB = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM,cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + dy + i*32;
if (iM < _M_){
if (wy < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < _N_){
if (dx < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + wy;
int iKB = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
} | 6b284d8e7ccf1bc42ca8c19db7a8507c25ce3568.cu | typedef long long ll_t;
typedef unsigned long long ull_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ __forceinline__ float atomicMax(float *address, float val)
{
int ret = __float_as_int(*address);
while(val > __int_as_float(ret))
{
int old = ret;
if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old)
break;
}
return __int_as_float(ret);
}
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
__device__ void SM2Cache(
float cache[8][4],
volatile float SM[8][128+4],
int vy, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<4; mi++){
cache[ki][mi] = SM[ki][8*vy + 4*p + mi];
}
}
}
__device__ void thread_matmul(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] = fmaf(a, b, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative squared euclidean distance
__device__ void thread_nseuclidean(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
float dif = a - b;
cCache[mi + 4*p].val[ni] = fmaf(- dif, dif, cCache[mi + 4*p].val[ni]);
}
}
}
}
// negative manhattan distance
__device__ void thread_nmanhattan(
float aCache[8][4],
volatile float bSM[8][128+4],
float8 cCache[8],
int vx, int p
) {
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int ni=0; ni<8; ni++){
// float b = bSM[ki][(8*vx)/32 + 8*vx + ni];
float b = bSM[ki][ vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<4; mi++){
float a = aCache[ki][mi];
cCache[mi + 4*p].val[ni] -= fabsf(a - b);
}
}
}
}
__device__ void reduce_dim_1(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iN = gStartx + vx*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iM = gStarty + vy*8 + j;
if (iM < _M_){
val = fmaxf(val, cCache[j].val[i]);
ind = val == cCache[j].val[i] ? iM : ind;
}
}
if (iN < N){
atomicMax(&C[(bid) * _N_ + iN], val);
if (C[(bid) * _N_ + iN] <= val){
D[(bid) * _N_ + iN] = ind;
}
}
}
}
__device__ void reduce_dim_2(
float8 cCache[8],
float* C,
ll_t* D,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
float val = -INFINITY;
ll_t ind = 0;
#pragma unroll
for (int j=0; j<8; j++){
int iN = gStartx + vx*8 + j;
if (iN < _N_){
val = fmaxf(val, cCache[i].val[j]);
ind = val == cCache[i].val[j] ? iN : ind;
}
}
if (iM < M){
atomicMax(&C[(bid) * _M_ + iM], val);
if (C[(bid) * _M_ + iM] <= val){
D[(bid) * _M_ + iM] = ind;
}
}
}
}
extern "C"
__global__ void max_sim_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + wx + i*32;
if (wy < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (gStartx + wx + i*32)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + wx;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iK)*_M_ + (iM)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iK)*_N_ + (iN)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
// thread_matmul(aCache, bSM, cCache, vx, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + dy + i*32;
if (dx < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
if (iN < _N_)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iK = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < _N_)
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer2[i] = 0.f;
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + dy;
if (iK < _K_){
if (iM < _M_)
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iK)];
if (iN < N)
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iK)];
} else {
aBuffer1[i] = 0.f;
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (iM < _M_){
if (dx < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (dx)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < N){
if (wy < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (wy)*_N_ + (iN)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + dx;
int iKB = gStartk + 8 + wy;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer1[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iM)*_K_ + (iKA)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iKB)*_N_ + (iN)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[dx][dy+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM,cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
}
extern "C"
__global__ void max_sim_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
ll_t* __restrict__ D,
int M, int N, int K, int DIM
){
int tid = threadIdx.x;
int bid = blockIdx.x;
int gStartx = blockIdx.y * 128;
int gStarty = blockIdx.z * 128;
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
__shared__ volatile float aSM[8][128+4];
__shared__ volatile float bSM[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
int nIt = (_K_ + 8 - 1) / 8;
float init_value = 0.f;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + dy + i*32;
if (iM < _M_){
if (wy < _K_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (wy)*_M_ + (iM)];
} else {
aBuffer1[i] = 0.f;
}
}
if (iN < _N_){
if (dx < _K_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (dx)];
} else {
bBuffer1[i] = 0.f;
}
}
}
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 8;
int iKA = gStartk + 8 + wy;
int iKB = gStartk + 8 + dx;
int is_odd = itr & 1;
if (is_odd == 0){
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer2[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer2[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer2[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer2[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer1[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
}
} else {
#pragma unroll
for (int i=0; i<4; i++){
if (itr < nIt - 1){
int iM = gStarty + i*32 + wx;
int iN = gStartx + i*32 + dy;
if (iKA < _K_){
if (iM < _M_){
aBuffer1[i] = A[(bid)*_M_*_K_ + (iKA)*_M_ + (iM)];
}
} else {
aBuffer1[i] = 0.f;
}
if (iKB < _K_){
if (iN < _N_){
bBuffer1[i] = B[(bid)*_N_*_K_ + (iN)*_K_ + (iKB)];
}
} else {
bBuffer1[i] = 0.f;
}
}
aSM[wy][wx+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer2[i];
}
}
__syncthreads();
float aCache[8][4];
#pragma unroll
for (int p=0; p<2; p++){
SM2Cache(aCache, aSM, vy, p);
_DISTFN_(aCache, bSM, cCache, vx, p);
}
__syncthreads();
}
if (DIM == 1){
reduce_dim_1(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
} else if (DIM == 2){
reduce_dim_2(
cCache, C, D,
gStartx, gStarty,
vx, vy, bid, M, N
);
}
} |
a3981d44b88bc122b6b4b5ebb36091ac5111aaac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "rocblas.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <queue>
using namespace std;
struct Point {
float x;
float y;
int cluster;
int noise; //-1 noise
};
int eps = 2;//neighborhood radius
int min_nb = 4;
Point host_sample[500];//312
int block_num, thread_num;
float __device__ dev_euclidean_distance(const Point &src, const Point &dest) {
float res = (src.x - dest.x) * (src.x - dest.x) + (src.y - dest.y) * (src.y - dest.y);
return sqrt(res);
}
/*to get the total list*/
void __global__ dev_region_query(Point* sample, int num, int* neighbors, int eps, int min_nb) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int line,col,pointer = tid;
unsigned int count;
while (pointer < num * num) {//id
line = pointer / num;
col = pointer % num;
float radius;
if (line <= col) {
radius = dev_euclidean_distance(sample[line], sample[col]);
if (radius <= eps) {
neighbors[pointer] = 1;
}
neighbors[col * num + line] = neighbors[pointer];//
}
pointer += blockDim.x * gridDim.x;
}
__syncthreads();
pointer = tid;
while (pointer < num) {
count = 1;
line = pointer * num;
for (int i = 0; i < num; i++) {
if (pointer != i && neighbors[line+i]) {//p
count++;
}
}
if (count >= min_nb) {
sample[pointer].noise++;
}
pointer += blockDim.x * gridDim.x;
}
}
void host_algorithm_dbscan(Point* host_sample, int num) {
/*sample*/
Point* cuda_sample;
hipMalloc((void**)&cuda_sample, num * sizeof(Point));
hipMemcpy(cuda_sample, host_sample, num * sizeof(Point), hipMemcpyHostToDevice);
/*neighbor list*/
int *host_neighbor = new int[num*num]();
int *dev_neighbor;
hipMalloc((void**)&dev_neighbor, num * num * sizeof(int));
dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb);
hipMemcpy(host_sample, cuda_sample, num * sizeof(Point), hipMemcpyDeviceToHost);
hipMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<num; i++){
cout << "------>sample.noise->" << host_sample[i].noise << "<-----" << endl;
}
queue<int> expand;
int cur_cluster = 0;
for (int i = 0; i < num; i++) {
if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) {
host_sample[i].cluster = ++cur_cluster;
int src = i * num;
for (int j = 0; j < num; j++) {
if (host_neighbor[src + j]) {
host_sample[j].cluster = cur_cluster;
expand.push(j);
}
}
while (!expand.empty()) {/*expand the cluster*/
if (host_sample[expand.front()].noise >= 0) {
src = expand.front() * num;
for (int j = 0; j < num; j++) {
if (host_neighbor[src + j] && host_sample[j].cluster < 1) {
host_sample[j].cluster = cur_cluster;
expand.push(j);
}
}
}
expand.pop();
}
}
}
hipFree(cuda_sample);hipFree(dev_neighbor);
}
int main(int argc, char* argv[]) {
ifstream fin("test.txt");
ofstream fout;
fout.open("result.txt");
int sample_num = 0;
double a, b;
while (fin >> a >> b) {
host_sample[sample_num].x = a;
host_sample[sample_num].y = b;
host_sample[sample_num].noise = -1;
host_sample[sample_num].cluster = -1;
sample_num++;
}
cout << "------>TOTAL SAMPLE NUMB0->" << sample_num << "<-----" << endl;
cout << "------>BL0CK=10 & THREAD=100<-------- "<< endl;
block_num = 10;
thread_num = 100;
cout<<"CALCULATING BY CUDA GTX965M......\n"<<endl;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
host_algorithm_dbscan(host_sample, sample_num);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float time;
hipEventElapsedTime(&time, start, end);
cout<<"time: "<< time <<"ms --device\n"<<endl;
for (int i = 0; i < sample_num; i++) {
fout <<"["<<host_sample[i].x << "," << host_sample[i].y << "] -->"<<host_sample[i].cluster<< endl;
}
fout.close();
system("pause");
return 0;
} | a3981d44b88bc122b6b4b5ebb36091ac5111aaac.cu | #include "cuda_runtime.h"
#include "device_functions.h"
#include "cublas_v2.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <queue>
using namespace std;
struct Point {
float x;
float y;
int cluster;
int noise; //-1 noise
};
int eps = 2;//neighborhood radius
int min_nb = 4;
Point host_sample[500];//312
int block_num, thread_num;
float __device__ dev_euclidean_distance(const Point &src, const Point &dest) {
float res = (src.x - dest.x) * (src.x - dest.x) + (src.y - dest.y) * (src.y - dest.y);
return sqrt(res);
}
/*to get the total list*/
void __global__ dev_region_query(Point* sample, int num, int* neighbors, int eps, int min_nb) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int line,col,pointer = tid;
unsigned int count;
while (pointer < num * num) {//全场唯一id
line = pointer / num;
col = pointer % num;
float radius;
if (line <= col) {
radius = dev_euclidean_distance(sample[line], sample[col]);
if (radius <= eps) {
neighbors[pointer] = 1;
}
neighbors[col * num + line] = neighbors[pointer];//对角线
}
pointer += blockDim.x * gridDim.x;
}
__syncthreads();
pointer = tid;
while (pointer < num) {
count = 1;
line = pointer * num;
for (int i = 0; i < num; i++) {
if (pointer != i && neighbors[line+i]) {//包含p点邻域元素个数
count++;
}
}
if (count >= min_nb) {
sample[pointer].noise++;
}
pointer += blockDim.x * gridDim.x;
}
}
void host_algorithm_dbscan(Point* host_sample, int num) {
/*sample*/
Point* cuda_sample;
cudaMalloc((void**)&cuda_sample, num * sizeof(Point));
cudaMemcpy(cuda_sample, host_sample, num * sizeof(Point), cudaMemcpyHostToDevice);
/*neighbor list*/
int *host_neighbor = new int[num*num]();
int *dev_neighbor;
cudaMalloc((void**)&dev_neighbor, num * num * sizeof(int));
dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb);
cudaMemcpy(host_sample, cuda_sample, num * sizeof(Point), cudaMemcpyDeviceToHost);
cudaMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<num; i++){
cout << "------>sample.noise->" << host_sample[i].noise << "<-----" << endl;
}
queue<int> expand;
int cur_cluster = 0;
for (int i = 0; i < num; i++) {
if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) {
host_sample[i].cluster = ++cur_cluster;
int src = i * num;
for (int j = 0; j < num; j++) {
if (host_neighbor[src + j]) {
host_sample[j].cluster = cur_cluster;
expand.push(j);
}
}
while (!expand.empty()) {/*expand the cluster*/
if (host_sample[expand.front()].noise >= 0) {
src = expand.front() * num;
for (int j = 0; j < num; j++) {
if (host_neighbor[src + j] && host_sample[j].cluster < 1) {
host_sample[j].cluster = cur_cluster;
expand.push(j);
}
}
}
expand.pop();
}
}
}
cudaFree(cuda_sample);cudaFree(dev_neighbor);
}
int main(int argc, char* argv[]) {
ifstream fin("test.txt");
ofstream fout;
fout.open("result.txt");
int sample_num = 0;
double a, b;
while (fin >> a >> b) {
host_sample[sample_num].x = a;
host_sample[sample_num].y = b;
host_sample[sample_num].noise = -1;
host_sample[sample_num].cluster = -1;
sample_num++;
}
cout << "------>TOTAL SAMPLE NUMB0->" << sample_num << "<-----" << endl;
cout << "------>BL0CK=10 & THREAD=100<-------- "<< endl;
block_num = 10;
thread_num = 100;
cout<<"CALCULATING BY CUDA GTX965M......\n"<<endl;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
host_algorithm_dbscan(host_sample, sample_num);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float time;
cudaEventElapsedTime(&time, start, end);
cout<<"time: "<< time <<"ms --device\n"<<endl;
for (int i = 0; i < sample_num; i++) {
fout <<"["<<host_sample[i].x << "," << host_sample[i].y << "] -->"<<host_sample[i].cluster<< endl;
}
fout.close();
system("pause");
return 0;
} |
3b4ae64d2b202cce00ffdf020620971cdd59c5d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixDivisionScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]/b;
}
}
Matrix Matrix::divideScalar(double m){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereoes da memria de vdeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memria
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espao na memria de vdeo
hipMalloc((void**)&d_a, aSize);
hipMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memria de vdeo alocada
hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice);
//Define as dimenses
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicao
hipLaunchKernelGGL(( matrixDivisionScalar), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, m, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost);
//Limpa a memria de vdeo
hipFree(d_a);
hipFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
| 3b4ae64d2b202cce00ffdf020620971cdd59c5d2.cu | //
// Created by root on 23/03/2020.
//
#include "../Matrix.cuh"
#include <iostream>
__global__ void matrixDivisionScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]/b;
}
}
Matrix Matrix::divideScalar(double m){
static double* c;
c = (double*) calloc(this->Rows*this->Columns,sizeof(double));
//Define os endereçoes da memória de vídeo
double *d_a, *d_c;
//Define o tamanho de cada matriz e escalar na memória
long aSize = this->Rows*this->Columns*sizeof(double);
long cSize = this->Rows*this->Columns*sizeof(double);
//Aloca espaço na memória de vídeo
cudaMalloc((void**)&d_a, aSize);
cudaMalloc((void**)&d_c, cSize);
//Move a matriz e o escalar para a memória de vídeo alocada
cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice);
//Define as dimensões
dim3 dimBlock(32,32); // 32x32 -> 1024 Threads
dim3 dimGrid(this->Rows,this->Columns);
//Efetua a multiplicação
matrixDivisionScalar<<<dimGrid, dimBlock>>>(d_a, m, d_c, this->Rows, this->Columns);
//Copia o resultado de volta
cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost);
//Limpa a memória de vídeo
cudaFree(d_a);
cudaFree(d_c);
//Salva
return {this->Columns, this->Rows, c};
}
|
78630d77c2dc92b3a62ea57b521b614a155ce924.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
__global__ colorWork(int* row_ptr, int* col_ind, int* neighbor_colors, int* color_array, int index, int i)
{
int tid =
for(int j=row_ptr[i]; j < row_ptr[i+1]; j++){
// neighborhood function to check neighbors of visited verte
if(color_array[col_ind[j]] != -1){
bool found = false;
for(int c=0; c < index; c++){
if(neighbor_colors[nov*tid+c] == color_array[col_ind[j]]){
found = true;
c = index;
}
}
if(found == false){
neighbor_colors[nov*tid+index] = color_array[col_ind[j]];
index = index+1;
}
//printf("forbidden added first degree %d %d \n",col_ind[j],color_array[col_ind[j]]);
}
for(int k=row_ptr[col_ind[j]]; k < row_ptr[col_ind[j]+1];k++){
if(color_array[col_ind[k]] != -1 && col_ind[k] != i){
//color neighbor color iinde ara yoksa ekle
bool alreadyIn = false;
for(int c=0; c < index; c++){
if(neighbor_colors[nov*tid+c] == color_array[col_ind[k]]){
alreadyIn = true;
c = index;
}
}
if(alreadyIn == false){
neighbor_colors[nov*tid+index] = color_array[col_ind[k]];
index = index+1;
//printf("forbidden added second degree %d %d \n",col_ind[k],color_array[col_ind[k]]);
}
}
}
}
}
void callD2GC(int* row_ptr, int* col_ind, int nov)
{
float totaltime;
hipEvent_t startEvent, endEvent;
hipEventCreate(&startEvent);
hipEventCreate(&endEvent);
//
int *color_array, *d_color_array, d_neighbor_colors;//array to keep colors of the vertices the color numbers start from 1
__shared__ int *d_neighbor_colors;
hipMalloc( (void**)&color_array, nov*sizeof(int));
hipMalloc( (void**)&neighbor_colors, nov*sizeof(int));
hipMalloc( (void**)&d_color_array, nov*sizeof(int));
hipMalloc( (void**)&d_neighbor_colors, nov*sizeof(int));
hipMemset( color_array, -1, nov*sizeof(int));
// neighbor_colors =(int*)malloc(thread_num*nov*sizeof(int));
hipMemcpy(d_color_array, color_array, nov*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_neighbor_colors, neighbor_colors, nov*sizeof(int), hipMemcpyHostToDevice);
int maxColor;//to print out color number
typedef int bool;//to boolean check
enum {false, true};
printf("Nov is %d\n", nov);
printf("Started...");
bool isFinished = false;//to check all the vertices colored without conflict and coloring finished
hipEventRecord(startEvent, 0);
while(!isFinished){
printf("Turn ");
isFinished = true;//if it is not finished it is changed in conflict check part
//COLORWORKQUEUE
#pragma omp parallel for num_threads(thread_num) shared(color_array)
for(int i=0; i < nov; i++){//in parallel visit all the vertices in graph
int tid = omp_get_thread_num();
if(color_array[i] == -1){//checks if vertex is colored in previous turns
int index = 0;//keeps number of colored neighbors
//forbidden color bulma
colorWork<<1,1>>(row_ptr, col_ind, );
/*printf("vertex is %d\nForbidden of %d\n",i,i);
for(int k=0;k<index;k++){
printf("%d ",neighbor_colors[nov*tid+k]);
}
printf("\n");*/
int col = 0;
bool sameWithNbor = true;
while(sameWithNbor){
sameWithNbor = false;
for(int k=0; k < index; k++){
if(col == neighbor_colors[nov*tid+k]){
col = col+1;
sameWithNbor = true;
}
}
}
color_array[i] = col;
}
}
// REMOVECONFLICTS
//TODO: Check d2 vertices
#pragma omp parallel for num_threads(thread_num) shared(isFinished, color_array)
for(int i=0; i < nov; i++){
for(int j=row_ptr[i]; j < row_ptr[i+1]; j++){
if(color_array[col_ind[j]] == color_array[i] && i > col_ind[j]){//if neighbor and vertex have same color and index of vertex is greater than neighbor
color_array[i] = -1;
j = row_ptr[i+1];
isFinished = false;
}
if(isFinished == true){
for(int k= row_ptr[col_ind[j]]; k < row_ptr[col_ind[j]+1]; k++){
if(color_array[col_ind[k]] == color_array[i] && i > col_ind[k]){
color_array[i] = -1;
k = row_ptr[col_ind[j]+1];
j = row_ptr[i+1];
isFinished = false;
}
}
}
}
}/*
printf("Colors are:\n");
for(int i=0; i<nov;i++){
printf("%d ",color_array[i]);
}*/
//printf("\n");
}
printf("\n");
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
hipEventElapsedTime(&totaltime, startEvent, endEvent);
printf("Execution time is %f secs.\n", totaltime/1000);
maxColor = color_array[0];
//printf("%d ",color_array[0]);
for(int i=1; i<nov;i++){
//printf("%d ",color_array[i]);
if(maxColor < color_array[i]){
maxColor = color_array[i];
}
}
printf("\nNumber of colors is %d\n", maxColor+1);
char result_name[1024];
strcpy(result_name,"resultOf-");
char mtx_name[255];
int index = strstr(fname,".")- fname;
strncpy(mtx_name, fname, index);
mtx_name[index] = '\0';
sprintf(result_name,"%s%s%s",result_name, mtx_name, ".txt");
FILE *f = fopen(result_name, "w");
if(f == NULL){
printf("Cannot open result_file to write\n");
exit(1);
}
fprintf(f,"%d", maxColor+1);
fprintf(f,"\n");
for(int i = 0; i<nov;i++){
fprintf(f,"%d ",color_array[i]);
}
fclose(f);
hipFree(color_array);
hipFree(neighbor_colors);
}
| 78630d77c2dc92b3a62ea57b521b614a155ce924.cu | #include <stdio.h>
#include <assert.h>
__global__ colorWork(int* row_ptr, int* col_ind, int* neighbor_colors, int* color_array, int index, int i)
{
int tid =
for(int j=row_ptr[i]; j < row_ptr[i+1]; j++){
// neighborhood function to check neighbors of visited verte
if(color_array[col_ind[j]] != -1){
bool found = false;
for(int c=0; c < index; c++){
if(neighbor_colors[nov*tid+c] == color_array[col_ind[j]]){
found = true;
c = index;
}
}
if(found == false){
neighbor_colors[nov*tid+index] = color_array[col_ind[j]];
index = index+1;
}
//printf("forbidden added first degree %d %d \n",col_ind[j],color_array[col_ind[j]]);
}
for(int k=row_ptr[col_ind[j]]; k < row_ptr[col_ind[j]+1];k++){
if(color_array[col_ind[k]] != -1 && col_ind[k] != i){
//colorı neighbor color içinde ara yoksa ekle
bool alreadyIn = false;
for(int c=0; c < index; c++){
if(neighbor_colors[nov*tid+c] == color_array[col_ind[k]]){
alreadyIn = true;
c = index;
}
}
if(alreadyIn == false){
neighbor_colors[nov*tid+index] = color_array[col_ind[k]];
index = index+1;
//printf("forbidden added second degree %d %d \n",col_ind[k],color_array[col_ind[k]]);
}
}
}
}
}
void callD2GC(int* row_ptr, int* col_ind, int nov)
{
float totaltime;
cudaEvent_t startEvent, endEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&endEvent);
//
int *color_array, *d_color_array, d_neighbor_colors;//array to keep colors of the vertices the color numbers start from 1
__shared__ int *d_neighbor_colors;
cudaMalloc( (void**)&color_array, nov*sizeof(int));
cudaMalloc( (void**)&neighbor_colors, nov*sizeof(int));
cudaMalloc( (void**)&d_color_array, nov*sizeof(int));
cudaMalloc( (void**)&d_neighbor_colors, nov*sizeof(int));
cudaMemset( color_array, -1, nov*sizeof(int));
// neighbor_colors =(int*)malloc(thread_num*nov*sizeof(int));
cudaMemcpy(d_color_array, color_array, nov*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbor_colors, neighbor_colors, nov*sizeof(int), cudaMemcpyHostToDevice);
int maxColor;//to print out color number
typedef int bool;//to boolean check
enum {false, true};
printf("Nov is %d\n", nov);
printf("Started...");
bool isFinished = false;//to check all the vertices colored without conflict and coloring finished
cudaEventRecord(startEvent, 0);
while(!isFinished){
printf("Turn ");
isFinished = true;//if it is not finished it is changed in conflict check part
//COLORWORKQUEUE
#pragma omp parallel for num_threads(thread_num) shared(color_array)
for(int i=0; i < nov; i++){//in parallel visit all the vertices in graph
int tid = omp_get_thread_num();
if(color_array[i] == -1){//checks if vertex is colored in previous turns
int index = 0;//keeps number of colored neighbors
//forbidden color bulma
colorWork<<1,1>>(row_ptr, col_ind, );
/*printf("vertex is %d\nForbidden of %d\n",i,i);
for(int k=0;k<index;k++){
printf("%d ",neighbor_colors[nov*tid+k]);
}
printf("\n");*/
int col = 0;
bool sameWithNbor = true;
while(sameWithNbor){
sameWithNbor = false;
for(int k=0; k < index; k++){
if(col == neighbor_colors[nov*tid+k]){
col = col+1;
sameWithNbor = true;
}
}
}
color_array[i] = col;
}
}
// REMOVECONFLICTS
//TODO: Check d2 vertices
#pragma omp parallel for num_threads(thread_num) shared(isFinished, color_array)
for(int i=0; i < nov; i++){
for(int j=row_ptr[i]; j < row_ptr[i+1]; j++){
if(color_array[col_ind[j]] == color_array[i] && i > col_ind[j]){//if neighbor and vertex have same color and index of vertex is greater than neighbor
color_array[i] = -1;
j = row_ptr[i+1];
isFinished = false;
}
if(isFinished == true){
for(int k= row_ptr[col_ind[j]]; k < row_ptr[col_ind[j]+1]; k++){
if(color_array[col_ind[k]] == color_array[i] && i > col_ind[k]){
color_array[i] = -1;
k = row_ptr[col_ind[j]+1];
j = row_ptr[i+1];
isFinished = false;
}
}
}
}
}/*
printf("Colors are:\n");
for(int i=0; i<nov;i++){
printf("%d ",color_array[i]);
}*/
//printf("\n");
}
printf("\n");
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&totaltime, startEvent, endEvent);
printf("Execution time is %f secs.\n", totaltime/1000);
maxColor = color_array[0];
//printf("%d ",color_array[0]);
for(int i=1; i<nov;i++){
//printf("%d ",color_array[i]);
if(maxColor < color_array[i]){
maxColor = color_array[i];
}
}
printf("\nNumber of colors is %d\n", maxColor+1);
char result_name[1024];
strcpy(result_name,"resultOf-");
char mtx_name[255];
int index = strstr(fname,".")- fname;
strncpy(mtx_name, fname, index);
mtx_name[index] = '\0';
sprintf(result_name,"%s%s%s",result_name, mtx_name, ".txt");
FILE *f = fopen(result_name, "w");
if(f == NULL){
printf("Cannot open result_file to write\n");
exit(1);
}
fprintf(f,"%d", maxColor+1);
fprintf(f,"\n");
for(int i = 0; i<nov;i++){
fprintf(f,"%d ",color_array[i]);
}
fclose(f);
cudaFree(color_array);
cudaFree(neighbor_colors);
}
|
f9306160e5843117cd11b416df47c0f6d746003b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <hip/hip_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search paramters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initalize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(hipMalloc((void **) &d_odata, memSize));
checkCudaErrors(hipMalloc((void **) &d_img0, memSize));
checkCudaErrors(hipMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(hipMemcpy(d_img0, h_img0, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_img1, h_img1, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_odata, memSize, hipMemcpyHostToDevice));
size_t offset = 0;
hipChannelFormatDesc ca_desc0 = hipCreateChannelDesc<unsigned int>();
hipChannelFormatDesc ca_desc1 = hipCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = hipAddressModeClamp;
tex2Dleft.addressMode[1] = hipAddressModeClamp;
tex2Dleft.filterMode = hipFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = hipAddressModeClamp;
tex2Dright.addressMode[1] = hipAddressModeClamp;
tex2Dright.filterMode = hipFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(hipBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(hipBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// launch the stereoDisparity kernel
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(hipMemcpy(h_odata, d_odata, memSize, hipMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(hipFree(d_odata));
checkCudaErrors(hipFree(d_img0));
checkCudaErrors(hipFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| f9306160e5843117cd11b416df47c0f6d746003b.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <cuda_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search paramters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initalize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(cudaMalloc((void **) &d_odata, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img0, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(cudaMemcpy(d_img0, h_img0, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_img1, h_img1, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_odata, memSize, cudaMemcpyHostToDevice));
size_t offset = 0;
cudaChannelFormatDesc ca_desc0 = cudaCreateChannelDesc<unsigned int>();
cudaChannelFormatDesc ca_desc1 = cudaCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = cudaAddressModeClamp;
tex2Dleft.addressMode[1] = cudaAddressModeClamp;
tex2Dleft.filterMode = cudaFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = cudaAddressModeClamp;
tex2Dright.addressMode[1] = cudaAddressModeClamp;
tex2Dright.filterMode = cudaFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// launch the stereoDisparity kernel
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(cudaMemcpy(h_odata, d_odata, memSize, cudaMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(cudaFree(d_odata));
checkCudaErrors(cudaFree(d_img0));
checkCudaErrors(cudaFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
efec8d5efa9db25a6f7c11d3400af085307afbdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/transform.cuh"
#include <malloc.h>
#include <assert.h>
#include <stdio.h>
#include "../include/operation.h"
#include "../include/cuRPCF.h"
#include <omp.h>
#include "../include/transpose.cuh"
#include <iostream>
hipfftHandle planXYr2c, planXYc2r, planZ_pad, planZ_no_pad;
hipfftHandle planXYr2c_X3, planXYc2r_X6, planZ_X6, planZ_X3;
#define KERNEL_SYNCHRONIZED
hipEvent_t start_trans, end_trans;
__host__ int initFFT(problem &pb) {
hipfftResult res;
const int mx = pb.mx;
const int my = pb.my;
const int mz = pb.mz;
const int inPitch = pb.pitch;
const int outPitch = pb.tPitch;
const int pmx = inPitch / sizeof(REAL);
const int pmz = outPitch / sizeof(cuRPCF::complex);
const int nx = mx / 3 * 2;
const int ny = my / 3 * 2;
const int istride = 1;
int inembed[2] = { my, pmx };
int idist = pmx*my;
int inembed2[2] = { my,pmx / 2 };
int idist2 = pmx / 2 * my;
int dim2[2] = { my,mx };
int dim1[1] = { mz };
int onembed[1] = { pmz };
const int odist = pmz;
const int ostride = 1;
int dim1_no_pad[1] = { mz / 2 };
//hipfftPlanMany( plan *, int dim, int* n, int* inembed, int istride, int idist
// int* onembed, int ostride, int odist, hipfftType, int batch);
res = hipfftPlanMany(&planXYr2c, 2, dim2, inembed, istride, idist,
inembed2, istride, idist2, myCUFFT_R2C, pb.pz);
if (!(res == HIPFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = hipfftPlanMany(&planXYc2r, 2, dim2, inembed2, istride, idist2,
inembed, istride, idist, myCUFFT_C2R, pb.pz);
if (!(res == HIPFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = hipfftPlanMany(&planZ_pad, 1, dim1, onembed, ostride, odist,
onembed, ostride, odist, myCUFFT_C2C, (nx/2+1)*ny);
if (!(res == HIPFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = hipfftPlanMany(&planZ_no_pad, 1, dim1_no_pad, onembed, ostride, odist,
onembed, ostride, odist, myCUFFT_C2C, (nx/2+1)*ny);
if (!(res == HIPFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed !" << std::endl;
};
//res = hipfftPlanMany(&planXYr2c_X3, 2, dim2, inembed, istride, idist,
// inembed2, istride, idist2, myCUFFT_R2C, pb.pz*3);
//assert(res == HIPFFT_SUCCESS);
//res = hipfftPlanMany(&planXYc2r_X6, 2, dim2, inembed2, istride, idist2,
// inembed, istride, idist, myCUFFT_C2R, pb.pz*6);
//res = hipfftPlanMany(&planZ_X3, 1, dim1, onembed, ostride, odist,
// onembed, ostride, odist, myCUFFT_C2C, (nx / 2 + 1)*ny*3);
//res = hipfftPlanMany(&planZ_X6, 1, dim1, onembed, ostride, odist,
// onembed, ostride, odist, myCUFFT_C2C, (nx / 2 + 1)*ny * 6);
assert(res == HIPFFT_SUCCESS);
assert(res == HIPFFT_SUCCESS);
hipEventCreate(&start_trans);
hipEventCreate(&end_trans);
return 0;
}
__host__ int transform_3d_one(DIRECTION dir, hipPitchedPtr& Ptr,
hipPitchedPtr& tPtr, int* dim, int* tDim,
Padding_mode pd, bool isOutput) {
//transform in x-y direction
hipfftResult res;
hipExtent extent = make_hipExtent(
2*(dim[0]/2+1) * sizeof(REAL), dim[1], dim[2]);
hipError_t err;
ASSERT(dim[0] == tDim[1]);
ASSERT(dim[1] == tDim[2]);
ASSERT(dim[2] == tDim[0]);
int nx = dim[0] / 3 * 2;
int ny = dim[1] / 3 * 2;
hipExtent tExtent = make_hipExtent(
tDim[0] * sizeof(cuRPCF::complex), nx/2+1 , ny);
hipExtent pExtent = make_hipExtent(
2 * (dim[0] / 2 + 1) * sizeof(REAL), dim[1], dim[2]/2+1);
dim3 threadDim(4, 4);
// REAL* buffer;
// REAL* tbuffer;
float time;
// tPtr -> Ptr
if (dir == BACKWARD) {
// size_t size = Ptr.pitch*dim[1] * dim[2];
// size_t pSize = Ptr.pitch*dim[1] * (dim[2]/2+1);
// size_t tSize = tPtr.pitch*(nx / 2 + 1)*ny;
// buffer = (REAL*)malloc(size);
// tbuffer = (REAL*)malloc(tSize);
// ASSERT(buffer != nullptr);
// ASSERT(tbuffer != nullptr);
//setZeros <<<1, threadDim >>> (Ptr, dim[0], dim[1], dim[2]);
//#ifdef DEBUG
// err = hipMemcpy(tbuffer, tPtr.ptr, tSize, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
// if(isOutput) RPCF::write_3d_to_file("beforeREV.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//chebyshev transform in z direction
cheby_s2p(tPtr, dim[0] / 2 + 1, dim[1] , dim[2], pd);
//transpose(dir, Ptr, tPtr, dim, tDim);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
cuCheck(myCudaMalloc(Ptr, XYZ_3D), "my hipMalloc");
cuda_transpose(dir, Ptr, tPtr, dim, tDim);
cuCheck(myCudaFree(tPtr, ZXY_3D), "my cuda free at transform");
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "transpose backward time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
setZeros((cuRPCF::complex*)Ptr.ptr, Ptr.pitch, dim3(dim[0], dim[1], dim[2]));
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "set zeros time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
void* dev_buffer = get_fft_buffer_ptr();
res = CUFFTEXEC_C2R(planXYc2r, (CUFFTCOMPLEX*)Ptr.ptr,
(CUFFTREAL*)Ptr.ptr);
//(CUFFTREAL*)dev_buffer);
//cuCheck(hipMemcpy(Ptr.ptr, dev_buffer, pSize, hipMemcpyDeviceToDevice),"mem move");
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "FFT XY BACKWARD TIME = " << time / 1000.0 << std::endl;
#endif
ASSERT(res == HIPFFT_SUCCESS);
err = hipDeviceSynchronize();
ASSERT(err == hipSuccess);
//#ifdef DEBUG
// err = hipMemcpy(buffer, Ptr.ptr, size, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterREV.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif //DEBUG
//#ifdef DEBUG
// err = hipMemcpy(buffer, Ptr.ptr, size, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterNORM.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif //DEBUG
}
else
{
// Ptr -> tPtr
// size_t size = Ptr.pitch*dim[1] * dim[2];
// size_t pSize = Ptr.pitch*dim[1] * (dim[2] / 2 + 1);
// size_t tSize = tPtr.pitch*(dim[0] / 2 + 1)*dim[1];
// buffer = (REAL*)malloc(size);
// tbuffer = (REAL*)malloc(tSize);
// ASSERT(buffer != nullptr);
// ASSERT(tbuffer != nullptr);
//ASSERT(err == hipSuccess);
//#ifdef DEBUG
// err = hipMemcpy(buffer, Ptr.ptr, size, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
// if (isOutput) RPCF::write_3d_to_file("before.txt", buffer, Ptr.pitch, 2*(dim[0]/2+1), dim[1], dim[2]);
//#endif //DEBUG
ASSERT(dir == FORWARD);
void* dev_buffer = get_fft_buffer_ptr();
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
res = CUFFTEXEC_R2C(planXYr2c, (CUFFTREAL*)Ptr.ptr,
(CUFFTCOMPLEX*)Ptr.ptr);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "FFT XY forward TIME = " << time / 1000.0 << std::endl;
#endif
//(CUFFTCOMPLEX*)dev_buffer);
//cuCheck(hipMemcpy(Ptr.ptr, dev_buffer, pSize, hipMemcpyDeviceToDevice), "mem move");
//#ifdef DEBUG
// err = hipMemcpy(buffer, Ptr.ptr, size, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
//
// if (isOutput) RPCF::write_3d_to_file("afterXY.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif // DEBUG
err = hipDeviceSynchronize();
ASSERT(err == hipSuccess);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
normalize(Ptr, dim3(dim[0], dim[1], dim[2]), 1.0 / dim[0] / dim[1]);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "normalize TIME = " << time / 1000.0 << std::endl;
//transpose(FORWARD, Ptr, tPtr, dim, tDim);
hipEventRecord(start_trans);
#endif
cuCheck(myCudaMalloc(tPtr, ZXY_3D), "my hipMalloc");
cuda_transpose(dir, Ptr, tPtr, dim, tDim);
cuCheck(myCudaFree(Ptr, XYZ_3D), "my cuda free at transform");
err = hipDeviceSynchronize();
ASSERT(err == hipSuccess);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "tranpose forward TIME = " << time / 1000.0 << std::endl;
#endif
//err = hipMemcpy(tbuffer, tPtr.ptr, tSize, hipMemcpyDeviceToHost);
//ASSERT(err == hipSuccess);
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
//#ifdef DEBUG
// if (isOutput) RPCF::write_3d_to_file("Transposed.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//transform in z direction
cheby_p2s(tPtr, dim[0] / 2 + 1, dim[1], dim[2], pd);
//#ifdef DEBUG
// err = hipMemcpy(tbuffer, tPtr.ptr, tSize, hipMemcpyDeviceToHost);
// ASSERT(err == hipSuccess);
// err = hipDeviceSynchronize();
// ASSERT(err == hipSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterZ.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//setZeros<<<1, threadDim >>>(Ptr, dim[0], dim[1], dim[2]);
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
}
// free(buffer);
// free(tbuffer);
return 0;
}
__host__ int transform(DIRECTION dir, problem& pb) {
int indim[3];
int outdim[3];
indim[0] = pb.mx;
indim[1] = pb.my;
indim[2] = pb.mz;
outdim[0] = pb.mz;
outdim[1] = pb.mx;
outdim[2] = pb.my;
if (dir == BACKWARD) {
//transform_backward_X6(pb);
//return 0;
transform_3d_one(BACKWARD, pb.dptr_u, pb.dptr_tu, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_v, pb.dptr_tv, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_w, pb.dptr_tw, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_x, pb.dptr_tomega_x, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_y, pb.dptr_tomega_y, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_z, pb.dptr_tomega_z, indim, outdim, Padding);
}
if (dir == FORWARD) {
//transform_forward_X3(pb);
//return 0;
transform_3d_one(FORWARD, pb.dptr_lamb_z, pb.dptr_tLamb_z, indim, outdim);
transform_3d_one(FORWARD, pb.dptr_lamb_y, pb.dptr_tLamb_y, indim, outdim);
transform_3d_one(FORWARD, pb.dptr_lamb_x, pb.dptr_tLamb_x, indim, outdim);
}
return 0;
}
//mx, my, mz is the size of large matrix
//nx, ny, nz is the size of the small matrix (dealiased)
__global__ void setZerosKernel(cuRPCF::complex* ptr,size_t pitch, int mx, int my, int mz) {
int ky = blockIdx.x;
int kz = blockIdx.y;
int kx = threadIdx.x;
if (ky >= my || kz >= mz/2+1 || kx>= mx/2+1) return;
assert(kx * sizeof(cuRPCF::complex) <= pitch);
size_t inc = pitch * (kz * my + ky)/sizeof(cuRPCF::complex);
ptr = ptr + inc;
int nx = mx / 3 * 2;
int ny = my / 3 * 2;
if (ky >= ny / 2 && ky <= my - (ny/2)) {
ptr[kx] = 0.0;
return;
}
else
{
if( kx >= nx/2 ) {
ptr[kx] = 0.0;
}
return;
}
}
__host__ void setZeros(cuRPCF::complex* ptr, size_t pitch, dim3 dims) {
int dim[3] = { dims.x,dims.y,dims.z };
hipLaunchKernelGGL(( setZerosKernel) , dim3(dim3(dims.y,dims.z/2+1)), dim3(dims.x/2+1) , 0, 0, (cuRPCF::complex*)ptr, pitch,
dim[0], dim[1], dim[2]);
//#ifdef KERNEL_SYNCHRONIZED
cuCheck(hipDeviceSynchronize(), "set zeros");
//#endif
}
__global__ void normalizeKernel(REAL* ptr, size_t pitch , int mx, int my, int mz, REAL factor) {
const int iy = blockIdx.x;
const int iz = blockIdx.y;
const int ix = threadIdx.x;
//if (iy >= my || iz >= mz/2+1)return;
//const int ny = my / 3 * 2;
//if (iy > ny / 2 && iy < my - (ny/2)) return;
//if (ix >= mx) return;
size_t dist = pitch*(my*iz + iy) / sizeof(cuRPCF::complex);
cuRPCF::complex* row = ((cuRPCF::complex*)ptr) + dist;
row[ix] = row[ix] * factor;
}
__host__ void normalize(hipPitchedPtr Ptr, dim3 dims, REAL factor) {
hipError_t err;
int dim[3] = { dims.x,dims.y,dims.z };
dim3 nDim(dim[1], dim[2] / 2 + 1);
hipLaunchKernelGGL(( normalizeKernel), dim3(nDim), dim3(dim[0]/2+1), 0, 0, (REAL*)Ptr.ptr, Ptr.pitch, dim[0], dim[1], dim[2], factor);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
#endif
ASSERT(err == hipSuccess);
}
//preprocessing of chebyshev transform, spect to phy
__global__ void cheby_pre_s2p_pad(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx-1)*2;
const int pz = mz / 2 + 1;
const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx / 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
if (ix >= hnx || iy >= ny)return;
const int iz = threadIdx.x;
if (iz > nz)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
/*for (int i = nz; i < pz; i++) {
u[i].re = 0.0;
u[i].im = 0.0;
}*/
u[iz + nz + 1] = 0.0;
u[iz + pz - 1] = 0.0;
/*for (int i = 0; i < nz; i++) {
u[i].re = u[i].re*0.5;
u[i].im = u[i].im*0.5;
}*/
u[iz] = u[iz] * 0.5;
/*for (int i = 1; i < pz - 1; i++) {
u[mz - i].re = u[i].re;
u[mz - i].im = u[i].im;
}*/
if (iz == 0) {
u[0] = u[0] * 2.0;
}
else {
u[mz - iz] = u[iz];
}
}
__global__ void cheby_pre_s2p_noPad(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx/ 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
const int iz = threadIdx.x;
if (ix >= hnx || iy >= ny)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
//for (int i = nz; i < pz; i++) {
// u[i].re = 0.0;
// u[i].im = 0.0;
//}
int i = iz;
//for (int i = 0; i < nz; i++) {
if (i <= nz) {
u[i].re = u[i].re*0.5;
u[i].im = u[i].im*0.5;
}
__syncthreads();
//for (int i = 1; i < nz - 1; i++) {
if (i >= 1 && i <= nz - 1) {
u[pz - 1 - i].re = u[i].re;
u[pz - 1 - i].im = u[i].im;
}else if (i == 0 || i==nz) {
//}else if (i == 0) {
u[i].re = u[i].re*2.0;
u[i].im = u[i].im*2.0;
}
}
//preprocessing of chebyshev transform, physical to spectral
__global__ void cheby_pre_p2s(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
// const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx / 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
const int iz = threadIdx.x;
if (ix >= hnx || iy >= ny)return;
if (iz >= pz - 1) return;
if (iz == 0)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
u[mz - iz].re = u[iz].re;
u[mz - iz].im = u[iz].im;
}
//post-processing of chebyshev transform, physical to spectral
__global__ void cheby_post_p2s(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
//const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx/ 3 * 2 /2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
if (ix >= hnx || iy >= ny)return;
const int iz = threadIdx.x;
if (iz >= pz)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
REAL factor = (1.0 / (pz - 1));
u[iz].re = u[iz].re*factor;
u[iz].im = u[iz].im*factor;
if (iz == 0 || iz == pz-1) {
u[iz].re = u[iz].re*0.5;
u[iz].im = u[iz].im*0.5;
}
}
__host__ void cheby_p2s(hipPitchedPtr tPtr, int hmx, int my, int mz, Padding_mode pad) {
// const size_t pitch = tPtr.pitch;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
//int threadDimx = 16;
//int threadDimy = 16;
//int blockDimx = hnx / threadDimx;
//int blockDimy = ny / threadDimy;
//if (hnx%threadDimx != 0) blockDimx++;
//if (ny%threadDimy != 0) blockDimy++;
//dim3 nthread(threadDimx, threadDimy);
//dim3 nBlock(blockDimx, blockDimy);
hipfftResult res;
hipError_t err;
float time;
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
// Transform with dealiasing
if (pad == Padding) {
hipLaunchKernelGGL(( cheby_pre_p2s) , dim3(dim3(hnx, ny)), mz / 2 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_p2s_time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
assert(res == HIPFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft p2s time = " << time / 1000.0 << std::endl;
//err = hipDeviceSynchronize();
//assert(err == hipSuccess);
hipEventRecord(start_trans);
#endif
cheby_post_p2s << <dim3(hnx, ny), mz / 2 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_post_p2s_time = " << time / 1000.0 << std::endl;
#endif
}
else //Transform without dealiasing
{
{
( cheby_pre_p2s) << <dim3(hnx, ny), mz / 4 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz/2);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_p2s_time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_no_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
assert(res == HIPFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft p2s time = " << time / 1000.0 << std::endl;
//err = hipDeviceSynchronize();
//assert(err == hipSuccess);
hipEventRecord(start_trans);
#endif
cheby_post_p2s , dim3(hnx, ny), mz / 4 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz/2);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_post_p2s_time = " << time / 1000.0 << std::endl;
#endif
}
}
}
//spectral to physical chebyshev transform in wall-normall direction
__host__ void cheby_s2p(hipPitchedPtr tPtr, int hmx, int my, int mz, Padding_mode doPadding) {
// const size_t pitch = tPtr.pitch;
// const int pz = mz / 2 + 1;
const int nx = (hmx-1)*2/3*2;
const int ny = my/3*2;
const int hnx = nx/2+1;
hipfftResult res;
hipError_t err;
float time;
if(doPadding == Padding){
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
cheby_pre_s2p_pad, dim3(hnx,ny), mz/4+1 , 0, 0, 0, (cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_s2p_pad_time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
ASSERT(res == HIPFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft s2p padding time = " << time / 1000.0 << std::endl;
#endif
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
}
else if(doPadding == No_Padding)
{
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(start_trans);
#endif
hipLaunchKernelGGL(( cheby_pre_s2p_noPad), dim3(dim3(hnx,ny)), dim3(mz/4) , 0, 0, (cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_s2p_nopad_time = " << time / 1000.0 << std::endl;
hipEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_no_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
ASSERT(res == HIPFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
hipEventRecord(end_trans);
hipEventSynchronize(end_trans);
hipEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft s2p no pad time = " << time / 1000.0 << std::endl;
#endif
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
}
else
{
assert(false);
}
}
__host__ void transform_backward_X6(problem& pb) {
int dim[3] = { pb.mx,pb.my,pb.mz };
int tDim[3] = { pb.mz,pb.mx,pb.my };
cheby_s2p_X6(pb.dptr_tu, dim[0] / 2 + 1, dim[1], dim[2]);
//transpose(dir, Ptr, tPtr, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_u, pb.dptr_tu, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_v, pb.dptr_tv, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_w, pb.dptr_tw, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_x, pb.dptr_tomega_x, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_y, pb.dptr_tomega_y, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_z, pb.dptr_tomega_z, dim, tDim);
hipPitchedPtr& Ptr = pb.dptr_u;
int nThreadx = 16;
int nThready = 16;
dim3 nThread(nThreadx, nThready);
int nDimx = dim[1] / nThreadx;
int nDimy = (dim[2] / 2 + 1)*6 / nThready;
if (dim[1] % nThreadx != 0) nDimx++;
if ((dim[2] / 2 + 1)*6 % nThready != 0) nDimy++;
dim3 nDim(nDimx, nDimy);
hipLaunchKernelGGL(( setZerosKernel), dim3(nDim), dim3(nThread) , 0, 0, (cuRPCF::complex*)Ptr.ptr, Ptr.pitch,
dim[0], dim[1], dim[2]*6);
#ifdef KERNEL_SYNCHRONIZED
cuCheck(hipDeviceSynchronize(), "set zeros");
#endif
hipfftResult_t res;
res = CUFFTEXEC_C2R(planXYc2r_X6, (CUFFTCOMPLEX*)pb.dptr_u.ptr,
(CUFFTREAL*)pb.dptr_u.ptr);
ASSERT(res == HIPFFT_SUCCESS);
cuCheck(hipDeviceSynchronize(),"fft");
}
__host__ void transform_forward_X3(problem& pb) {
hipfftResult_t res;
hipPitchedPtr Ptr = pb.dptr_lamb_x;
res = CUFFTEXEC_R2C(planXYr2c_X3, (CUFFTREAL*)Ptr.ptr,
(CUFFTCOMPLEX*)Ptr.ptr);
int dim[3] = { pb.mx, pb.my, pb.mz };
int tDim[3] = { pb.mz, pb.mx, pb.my };
//normalize;
int nthreadx = 16;
int nthready = 16;
int nDimx = dim[1] / nthreadx;
int nDimy = (dim[2] / 2 + 1) * 3/ nthready;
if (dim[1] % nthreadx != 0) nDimx++;
if ((dim[2] / 2 + 1)*3 % nthready != 0) nDimy++;
dim3 dim_num(nDimx, nDimy);
dim3 thread_num(nthreadx, nthready);
// THIS LAUNCH PARAMETER NEED TO BE CHANGED
hipLaunchKernelGGL(( normalizeKernel), dim3(dim_num), dim3(thread_num) , 0, 0,
(REAL*)Ptr.ptr, Ptr.pitch, dim[0], dim[1], dim[2]*3, 1.0 / dim[0] / dim[1]);
cuCheck(hipDeviceSynchronize(),"normalize X3");
cuda_transpose(FORWARD, pb.dptr_lamb_z, pb.dptr_tLamb_z, dim, tDim);
cuda_transpose(FORWARD, pb.dptr_lamb_y, pb.dptr_tLamb_y, dim, tDim);
cuda_transpose(FORWARD, pb.dptr_lamb_x, pb.dptr_tLamb_x, dim, tDim);
cheby_p2s_X3(pb.dptr_tLamb_x, dim[0] / 2 + 1, dim[1], dim[2]);
}
__host__ void cheby_p2s_X3(hipPitchedPtr tPtr, int hmx, int my, int mz) {
// const size_t pitch = tPtr.pitch;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
int threadDimx = 16;
int threadDimy = 16;
int blockDimx = hnx / threadDimx;
int blockDimy = ny*3 / threadDimy;
if (hnx%threadDimx != 0) blockDimx++;
if (ny*3%threadDimy != 0) blockDimy++;
dim3 nthread(threadDimx, threadDimy);
dim3 nBlock(blockDimx, blockDimy);
hipfftResult res;
hipError_t err;
hipLaunchKernelGGL(( cheby_pre_p2s) , nBlock, nthread >> >((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my*3, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
res = CUFFTEXEC_C2C(planZ_X3, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
assert(res == HIPFFT_SUCCESS);
//err = hipDeviceSynchronize();
//assert(err == hipSuccess);
cheby_post_p2s , nBlock, nthread , 0, 0, 0, (cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my*3, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
}
__host__ void cheby_s2p_X6(hipPitchedPtr tPtr, int hmx, int my, int mz) {
// const size_t pitch = tPtr.pitch;
//const int pz = mz / 2 + 1;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
int threadDimx = 16;
int threadDimy = 16;
int blockDimx = hnx / threadDimx;
int blockDimy = 6*ny / threadDimy;
if (hnx%threadDimx != 0) blockDimx++;
if (6*ny%threadDimy != 0) blockDimy++;
dim3 nthread(threadDimx, threadDimy);
dim3 nBlock(blockDimx, blockDimy);
hipfftResult res;
hipError_t err;
hipLaunchKernelGGL(( cheby_pre_s2p_pad) , dim3(nBlock), dim3(nthread) , 0, 0, (cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, 6*my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = hipDeviceSynchronize();
assert(err == hipSuccess);
#endif
res = CUFFTEXEC_C2C(planZ_X6, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, HIPFFT_FORWARD);
ASSERT(res == HIPFFT_SUCCESS);
//err = hipDeviceSynchronize();
//ASSERT(err == hipSuccess);
} | efec8d5efa9db25a6f7c11d3400af085307afbdf.cu | #include "../include/transform.cuh"
#include <malloc.h>
#include <assert.h>
#include <stdio.h>
#include "../include/operation.h"
#include "../include/cuRPCF.h"
#include <omp.h>
#include "../include/transpose.cuh"
#include <iostream>
cufftHandle planXYr2c, planXYc2r, planZ_pad, planZ_no_pad;
cufftHandle planXYr2c_X3, planXYc2r_X6, planZ_X6, planZ_X3;
#define KERNEL_SYNCHRONIZED
cudaEvent_t start_trans, end_trans;
__host__ int initFFT(problem &pb) {
cufftResult res;
const int mx = pb.mx;
const int my = pb.my;
const int mz = pb.mz;
const int inPitch = pb.pitch;
const int outPitch = pb.tPitch;
const int pmx = inPitch / sizeof(REAL);
const int pmz = outPitch / sizeof(cuRPCF::complex);
const int nx = mx / 3 * 2;
const int ny = my / 3 * 2;
const int istride = 1;
int inembed[2] = { my, pmx };
int idist = pmx*my;
int inembed2[2] = { my,pmx / 2 };
int idist2 = pmx / 2 * my;
int dim2[2] = { my,mx };
int dim1[1] = { mz };
int onembed[1] = { pmz };
const int odist = pmz;
const int ostride = 1;
int dim1_no_pad[1] = { mz / 2 };
//cufftPlanMany( plan *, int dim, int* n, int* inembed, int istride, int idist
// int* onembed, int ostride, int odist, cufftType, int batch);
res = cufftPlanMany(&planXYr2c, 2, dim2, inembed, istride, idist,
inembed2, istride, idist2, myCUFFT_R2C, pb.pz);
if (!(res == CUFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = cufftPlanMany(&planXYc2r, 2, dim2, inembed2, istride, idist2,
inembed, istride, idist, myCUFFT_C2R, pb.pz);
if (!(res == CUFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = cufftPlanMany(&planZ_pad, 1, dim1, onembed, ostride, odist,
onembed, ostride, odist, myCUFFT_C2C, (nx/2+1)*ny);
if (!(res == CUFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed!" << std::endl;
};
res = cufftPlanMany(&planZ_no_pad, 1, dim1_no_pad, onembed, ostride, odist,
onembed, ostride, odist, myCUFFT_C2C, (nx/2+1)*ny);
if (!(res == CUFFT_SUCCESS)) {
std::cerr << "[ERROR]:plan create failed !" << std::endl;
};
//res = cufftPlanMany(&planXYr2c_X3, 2, dim2, inembed, istride, idist,
// inembed2, istride, idist2, myCUFFT_R2C, pb.pz*3);
//assert(res == CUFFT_SUCCESS);
//res = cufftPlanMany(&planXYc2r_X6, 2, dim2, inembed2, istride, idist2,
// inembed, istride, idist, myCUFFT_C2R, pb.pz*6);
//res = cufftPlanMany(&planZ_X3, 1, dim1, onembed, ostride, odist,
// onembed, ostride, odist, myCUFFT_C2C, (nx / 2 + 1)*ny*3);
//res = cufftPlanMany(&planZ_X6, 1, dim1, onembed, ostride, odist,
// onembed, ostride, odist, myCUFFT_C2C, (nx / 2 + 1)*ny * 6);
assert(res == CUFFT_SUCCESS);
assert(res == CUFFT_SUCCESS);
cudaEventCreate(&start_trans);
cudaEventCreate(&end_trans);
return 0;
}
__host__ int transform_3d_one(DIRECTION dir, cudaPitchedPtr& Ptr,
cudaPitchedPtr& tPtr, int* dim, int* tDim,
Padding_mode pd, bool isOutput) {
//transform in x-y direction
cufftResult res;
cudaExtent extent = make_cudaExtent(
2*(dim[0]/2+1) * sizeof(REAL), dim[1], dim[2]);
cudaError_t err;
ASSERT(dim[0] == tDim[1]);
ASSERT(dim[1] == tDim[2]);
ASSERT(dim[2] == tDim[0]);
int nx = dim[0] / 3 * 2;
int ny = dim[1] / 3 * 2;
cudaExtent tExtent = make_cudaExtent(
tDim[0] * sizeof(cuRPCF::complex), nx/2+1 , ny);
cudaExtent pExtent = make_cudaExtent(
2 * (dim[0] / 2 + 1) * sizeof(REAL), dim[1], dim[2]/2+1);
dim3 threadDim(4, 4);
// REAL* buffer;
// REAL* tbuffer;
float time;
// tPtr -> Ptr
if (dir == BACKWARD) {
// size_t size = Ptr.pitch*dim[1] * dim[2];
// size_t pSize = Ptr.pitch*dim[1] * (dim[2]/2+1);
// size_t tSize = tPtr.pitch*(nx / 2 + 1)*ny;
// buffer = (REAL*)malloc(size);
// tbuffer = (REAL*)malloc(tSize);
// ASSERT(buffer != nullptr);
// ASSERT(tbuffer != nullptr);
//setZeros <<<1, threadDim >>> (Ptr, dim[0], dim[1], dim[2]);
//#ifdef DEBUG
// err = cudaMemcpy(tbuffer, tPtr.ptr, tSize, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
// if(isOutput) RPCF::write_3d_to_file("beforeREV.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//chebyshev transform in z direction
cheby_s2p(tPtr, dim[0] / 2 + 1, dim[1] , dim[2], pd);
//transpose(dir, Ptr, tPtr, dim, tDim);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
cuCheck(myCudaMalloc(Ptr, XYZ_3D), "my cudaMalloc");
cuda_transpose(dir, Ptr, tPtr, dim, tDim);
cuCheck(myCudaFree(tPtr, ZXY_3D), "my cuda free at transform");
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "transpose backward time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
setZeros((cuRPCF::complex*)Ptr.ptr, Ptr.pitch, dim3(dim[0], dim[1], dim[2]));
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "set zeros time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
void* dev_buffer = get_fft_buffer_ptr();
res = CUFFTEXEC_C2R(planXYc2r, (CUFFTCOMPLEX*)Ptr.ptr,
(CUFFTREAL*)Ptr.ptr);
//(CUFFTREAL*)dev_buffer);
//cuCheck(cudaMemcpy(Ptr.ptr, dev_buffer, pSize, cudaMemcpyDeviceToDevice),"mem move");
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "FFT XY BACKWARD TIME = " << time / 1000.0 << std::endl;
#endif
ASSERT(res == CUFFT_SUCCESS);
err = cudaDeviceSynchronize();
ASSERT(err == cudaSuccess);
//#ifdef DEBUG
// err = cudaMemcpy(buffer, Ptr.ptr, size, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterREV.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif //DEBUG
//#ifdef DEBUG
// err = cudaMemcpy(buffer, Ptr.ptr, size, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterNORM.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif //DEBUG
}
else
{
// Ptr -> tPtr
// size_t size = Ptr.pitch*dim[1] * dim[2];
// size_t pSize = Ptr.pitch*dim[1] * (dim[2] / 2 + 1);
// size_t tSize = tPtr.pitch*(dim[0] / 2 + 1)*dim[1];
// buffer = (REAL*)malloc(size);
// tbuffer = (REAL*)malloc(tSize);
// ASSERT(buffer != nullptr);
// ASSERT(tbuffer != nullptr);
//ASSERT(err == cudaSuccess);
//#ifdef DEBUG
// err = cudaMemcpy(buffer, Ptr.ptr, size, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
// if (isOutput) RPCF::write_3d_to_file("before.txt", buffer, Ptr.pitch, 2*(dim[0]/2+1), dim[1], dim[2]);
//#endif //DEBUG
ASSERT(dir == FORWARD);
void* dev_buffer = get_fft_buffer_ptr();
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
res = CUFFTEXEC_R2C(planXYr2c, (CUFFTREAL*)Ptr.ptr,
(CUFFTCOMPLEX*)Ptr.ptr);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "FFT XY forward TIME = " << time / 1000.0 << std::endl;
#endif
//(CUFFTCOMPLEX*)dev_buffer);
//cuCheck(cudaMemcpy(Ptr.ptr, dev_buffer, pSize, cudaMemcpyDeviceToDevice), "mem move");
//#ifdef DEBUG
// err = cudaMemcpy(buffer, Ptr.ptr, size, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
//
// if (isOutput) RPCF::write_3d_to_file("afterXY.txt", buffer, Ptr.pitch, 2 * (dim[0] / 2 + 1), dim[1], dim[2]);
//#endif // DEBUG
err = cudaDeviceSynchronize();
ASSERT(err == cudaSuccess);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
normalize(Ptr, dim3(dim[0], dim[1], dim[2]), 1.0 / dim[0] / dim[1]);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "normalize TIME = " << time / 1000.0 << std::endl;
//transpose(FORWARD, Ptr, tPtr, dim, tDim);
cudaEventRecord(start_trans);
#endif
cuCheck(myCudaMalloc(tPtr, ZXY_3D), "my cudaMalloc");
cuda_transpose(dir, Ptr, tPtr, dim, tDim);
cuCheck(myCudaFree(Ptr, XYZ_3D), "my cuda free at transform");
err = cudaDeviceSynchronize();
ASSERT(err == cudaSuccess);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "tranpose forward TIME = " << time / 1000.0 << std::endl;
#endif
//err = cudaMemcpy(tbuffer, tPtr.ptr, tSize, cudaMemcpyDeviceToHost);
//ASSERT(err == cudaSuccess);
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
//#ifdef DEBUG
// if (isOutput) RPCF::write_3d_to_file("Transposed.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//transform in z direction
cheby_p2s(tPtr, dim[0] / 2 + 1, dim[1], dim[2], pd);
//#ifdef DEBUG
// err = cudaMemcpy(tbuffer, tPtr.ptr, tSize, cudaMemcpyDeviceToHost);
// ASSERT(err == cudaSuccess);
// err = cudaDeviceSynchronize();
// ASSERT(err == cudaSuccess);
// if (isOutput) RPCF::write_3d_to_file("afterZ.txt", tbuffer, tPtr.pitch, 2 * dim[2], (dim[0] / 2 + 1), dim[1]);
//#endif //DEBUG
//setZeros<<<1, threadDim >>>(Ptr, dim[0], dim[1], dim[2]);
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
}
// free(buffer);
// free(tbuffer);
return 0;
}
__host__ int transform(DIRECTION dir, problem& pb) {
int indim[3];
int outdim[3];
indim[0] = pb.mx;
indim[1] = pb.my;
indim[2] = pb.mz;
outdim[0] = pb.mz;
outdim[1] = pb.mx;
outdim[2] = pb.my;
if (dir == BACKWARD) {
//transform_backward_X6(pb);
//return 0;
transform_3d_one(BACKWARD, pb.dptr_u, pb.dptr_tu, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_v, pb.dptr_tv, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_w, pb.dptr_tw, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_x, pb.dptr_tomega_x, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_y, pb.dptr_tomega_y, indim, outdim, Padding);
transform_3d_one(BACKWARD, pb.dptr_omega_z, pb.dptr_tomega_z, indim, outdim, Padding);
}
if (dir == FORWARD) {
//transform_forward_X3(pb);
//return 0;
transform_3d_one(FORWARD, pb.dptr_lamb_z, pb.dptr_tLamb_z, indim, outdim);
transform_3d_one(FORWARD, pb.dptr_lamb_y, pb.dptr_tLamb_y, indim, outdim);
transform_3d_one(FORWARD, pb.dptr_lamb_x, pb.dptr_tLamb_x, indim, outdim);
}
return 0;
}
//mx, my, mz is the size of large matrix
//nx, ny, nz is the size of the small matrix (dealiased)
__global__ void setZerosKernel(cuRPCF::complex* ptr,size_t pitch, int mx, int my, int mz) {
int ky = blockIdx.x;
int kz = blockIdx.y;
int kx = threadIdx.x;
if (ky >= my || kz >= mz/2+1 || kx>= mx/2+1) return;
assert(kx * sizeof(cuRPCF::complex) <= pitch);
size_t inc = pitch * (kz * my + ky)/sizeof(cuRPCF::complex);
ptr = ptr + inc;
int nx = mx / 3 * 2;
int ny = my / 3 * 2;
if (ky >= ny / 2 && ky <= my - (ny/2)) {
ptr[kx] = 0.0;
return;
}
else
{
if( kx >= nx/2 ) {
ptr[kx] = 0.0;
}
return;
}
}
__host__ void setZeros(cuRPCF::complex* ptr, size_t pitch, dim3 dims) {
int dim[3] = { dims.x,dims.y,dims.z };
setZerosKernel <<<dim3(dims.y,dims.z/2+1), dims.x/2+1 >>>((cuRPCF::complex*)ptr, pitch,
dim[0], dim[1], dim[2]);
//#ifdef KERNEL_SYNCHRONIZED
cuCheck(cudaDeviceSynchronize(), "set zeros");
//#endif
}
__global__ void normalizeKernel(REAL* ptr, size_t pitch , int mx, int my, int mz, REAL factor) {
const int iy = blockIdx.x;
const int iz = blockIdx.y;
const int ix = threadIdx.x;
//if (iy >= my || iz >= mz/2+1)return;
//const int ny = my / 3 * 2;
//if (iy > ny / 2 && iy < my - (ny/2)) return;
//if (ix >= mx) return;
size_t dist = pitch*(my*iz + iy) / sizeof(cuRPCF::complex);
cuRPCF::complex* row = ((cuRPCF::complex*)ptr) + dist;
row[ix] = row[ix] * factor;
}
__host__ void normalize(cudaPitchedPtr Ptr, dim3 dims, REAL factor) {
cudaError_t err;
int dim[3] = { dims.x,dims.y,dims.z };
dim3 nDim(dim[1], dim[2] / 2 + 1);
normalizeKernel<<<nDim, dim[0]/2+1>>> ((REAL*)Ptr.ptr, Ptr.pitch, dim[0], dim[1], dim[2], factor);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
#endif
ASSERT(err == cudaSuccess);
}
//preprocessing of chebyshev transform, spect to phy
__global__ void cheby_pre_s2p_pad(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx-1)*2;
const int pz = mz / 2 + 1;
const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx / 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
if (ix >= hnx || iy >= ny)return;
const int iz = threadIdx.x;
if (iz > nz)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
/*for (int i = nz; i < pz; i++) {
u[i].re = 0.0;
u[i].im = 0.0;
}*/
u[iz + nz + 1] = 0.0;
u[iz + pz - 1] = 0.0;
/*for (int i = 0; i < nz; i++) {
u[i].re = u[i].re*0.5;
u[i].im = u[i].im*0.5;
}*/
u[iz] = u[iz] * 0.5;
/*for (int i = 1; i < pz - 1; i++) {
u[mz - i].re = u[i].re;
u[mz - i].im = u[i].im;
}*/
if (iz == 0) {
u[0] = u[0] * 2.0;
}
else {
u[mz - iz] = u[iz];
}
}
__global__ void cheby_pre_s2p_noPad(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx/ 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
const int iz = threadIdx.x;
if (ix >= hnx || iy >= ny)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
//for (int i = nz; i < pz; i++) {
// u[i].re = 0.0;
// u[i].im = 0.0;
//}
int i = iz;
//for (int i = 0; i < nz; i++) {
if (i <= nz) {
u[i].re = u[i].re*0.5;
u[i].im = u[i].im*0.5;
}
__syncthreads();
//for (int i = 1; i < nz - 1; i++) {
if (i >= 1 && i <= nz - 1) {
u[pz - 1 - i].re = u[i].re;
u[pz - 1 - i].im = u[i].im;
}else if (i == 0 || i==nz) {
//}else if (i == 0) {
u[i].re = u[i].re*2.0;
u[i].im = u[i].im*2.0;
}
}
//preprocessing of chebyshev transform, physical to spectral
__global__ void cheby_pre_p2s(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
// const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx / 3 * 2 / 2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
const int iz = threadIdx.x;
if (ix >= hnx || iy >= ny)return;
if (iz >= pz - 1) return;
if (iz == 0)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
u[mz - iz].re = u[iz].re;
u[mz - iz].im = u[iz].im;
}
//post-processing of chebyshev transform, physical to spectral
__global__ void cheby_post_p2s(cuRPCF::complex* u, const size_t pitch, const int hmx, const int my, const int mz) {
const int mx = (hmx - 1) * 2;
const int pz = mz / 2 + 1;
//const int nz = mz / 4; //here, nz is the max index of z (start from 0)
const int hnx = mx/ 3 * 2 /2 + 1;
const int ny = my / 3 * 2;
const int ix = blockIdx.x;
const int iy = blockIdx.y;
if (ix >= hnx || iy >= ny)return;
const int iz = threadIdx.x;
if (iz >= pz)return;
size_t dist = pitch*(hnx*iy + ix) / sizeof(cuRPCF::complex);
u = u + dist;
REAL factor = (1.0 / (pz - 1));
u[iz].re = u[iz].re*factor;
u[iz].im = u[iz].im*factor;
if (iz == 0 || iz == pz-1) {
u[iz].re = u[iz].re*0.5;
u[iz].im = u[iz].im*0.5;
}
}
__host__ void cheby_p2s(cudaPitchedPtr tPtr, int hmx, int my, int mz, Padding_mode pad) {
// const size_t pitch = tPtr.pitch;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
//int threadDimx = 16;
//int threadDimy = 16;
//int blockDimx = hnx / threadDimx;
//int blockDimy = ny / threadDimy;
//if (hnx%threadDimx != 0) blockDimx++;
//if (ny%threadDimy != 0) blockDimy++;
//dim3 nthread(threadDimx, threadDimy);
//dim3 nBlock(blockDimx, blockDimy);
cufftResult res;
cudaError_t err;
float time;
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
// Transform with dealiasing
if (pad == Padding) {
cheby_pre_p2s <<< dim3(hnx, ny), mz / 2 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_p2s_time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
assert(res == CUFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft p2s time = " << time / 1000.0 << std::endl;
//err = cudaDeviceSynchronize();
//assert(err == cudaSuccess);
cudaEventRecord(start_trans);
#endif
cheby_post_p2s << <dim3(hnx, ny), mz / 2 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_post_p2s_time = " << time / 1000.0 << std::endl;
#endif
}
else //Transform without dealiasing
{
{
cheby_pre_p2s << <dim3(hnx, ny), mz / 4 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz/2);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_p2s_time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_no_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
assert(res == CUFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft p2s time = " << time / 1000.0 << std::endl;
//err = cudaDeviceSynchronize();
//assert(err == cudaSuccess);
cudaEventRecord(start_trans);
#endif
cheby_post_p2s <<<dim3(hnx, ny), mz / 4 + 1 >> > ((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz/2);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_post_p2s_time = " << time / 1000.0 << std::endl;
#endif
}
}
}
//spectral to physical chebyshev transform in wall-normall direction
__host__ void cheby_s2p(cudaPitchedPtr tPtr, int hmx, int my, int mz, Padding_mode doPadding) {
// const size_t pitch = tPtr.pitch;
// const int pz = mz / 2 + 1;
const int nx = (hmx-1)*2/3*2;
const int ny = my/3*2;
const int hnx = nx/2+1;
cufftResult res;
cudaError_t err;
float time;
if(doPadding == Padding){
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
cheby_pre_s2p_pad<<<dim3(hnx,ny), mz/4+1 >>>((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_s2p_pad_time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
ASSERT(res == CUFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft s2p padding time = " << time / 1000.0 << std::endl;
#endif
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
}
else if(doPadding == No_Padding)
{
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(start_trans);
#endif
cheby_pre_s2p_noPad<<<dim3(hnx,ny), mz/4 >>>((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby_pre_s2p_nopad_time = " << time / 1000.0 << std::endl;
cudaEventRecord(start_trans);
#endif
res = CUFFTEXEC_C2C(planZ_no_pad, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
ASSERT(res == CUFFT_SUCCESS);
#if (defined CURPCF_CUDA_PROFILING) && (defined SHOW_TRANSFORM_TIME)
cudaEventRecord(end_trans);
cudaEventSynchronize(end_trans);
cudaEventElapsedTime(&time, start_trans, end_trans);
std::cout << "cheby fft s2p no pad time = " << time / 1000.0 << std::endl;
#endif
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
}
else
{
assert(false);
}
}
__host__ void transform_backward_X6(problem& pb) {
int dim[3] = { pb.mx,pb.my,pb.mz };
int tDim[3] = { pb.mz,pb.mx,pb.my };
cheby_s2p_X6(pb.dptr_tu, dim[0] / 2 + 1, dim[1], dim[2]);
//transpose(dir, Ptr, tPtr, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_u, pb.dptr_tu, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_v, pb.dptr_tv, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_w, pb.dptr_tw, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_x, pb.dptr_tomega_x, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_y, pb.dptr_tomega_y, dim, tDim);
cuda_transpose(BACKWARD, pb.dptr_omega_z, pb.dptr_tomega_z, dim, tDim);
cudaPitchedPtr& Ptr = pb.dptr_u;
int nThreadx = 16;
int nThready = 16;
dim3 nThread(nThreadx, nThready);
int nDimx = dim[1] / nThreadx;
int nDimy = (dim[2] / 2 + 1)*6 / nThready;
if (dim[1] % nThreadx != 0) nDimx++;
if ((dim[2] / 2 + 1)*6 % nThready != 0) nDimy++;
dim3 nDim(nDimx, nDimy);
setZerosKernel<<<nDim, nThread >>>((cuRPCF::complex*)Ptr.ptr, Ptr.pitch,
dim[0], dim[1], dim[2]*6);
#ifdef KERNEL_SYNCHRONIZED
cuCheck(cudaDeviceSynchronize(), "set zeros");
#endif
cufftResult_t res;
res = CUFFTEXEC_C2R(planXYc2r_X6, (CUFFTCOMPLEX*)pb.dptr_u.ptr,
(CUFFTREAL*)pb.dptr_u.ptr);
ASSERT(res == CUFFT_SUCCESS);
cuCheck(cudaDeviceSynchronize(),"fft");
}
__host__ void transform_forward_X3(problem& pb) {
cufftResult_t res;
cudaPitchedPtr Ptr = pb.dptr_lamb_x;
res = CUFFTEXEC_R2C(planXYr2c_X3, (CUFFTREAL*)Ptr.ptr,
(CUFFTCOMPLEX*)Ptr.ptr);
int dim[3] = { pb.mx, pb.my, pb.mz };
int tDim[3] = { pb.mz, pb.mx, pb.my };
//normalize;
int nthreadx = 16;
int nthready = 16;
int nDimx = dim[1] / nthreadx;
int nDimy = (dim[2] / 2 + 1) * 3/ nthready;
if (dim[1] % nthreadx != 0) nDimx++;
if ((dim[2] / 2 + 1)*3 % nthready != 0) nDimy++;
dim3 dim_num(nDimx, nDimy);
dim3 thread_num(nthreadx, nthready);
// THIS LAUNCH PARAMETER NEED TO BE CHANGED
normalizeKernel<<< dim_num, thread_num >>>
((REAL*)Ptr.ptr, Ptr.pitch, dim[0], dim[1], dim[2]*3, 1.0 / dim[0] / dim[1]);
cuCheck(cudaDeviceSynchronize(),"normalize X3");
cuda_transpose(FORWARD, pb.dptr_lamb_z, pb.dptr_tLamb_z, dim, tDim);
cuda_transpose(FORWARD, pb.dptr_lamb_y, pb.dptr_tLamb_y, dim, tDim);
cuda_transpose(FORWARD, pb.dptr_lamb_x, pb.dptr_tLamb_x, dim, tDim);
cheby_p2s_X3(pb.dptr_tLamb_x, dim[0] / 2 + 1, dim[1], dim[2]);
}
__host__ void cheby_p2s_X3(cudaPitchedPtr tPtr, int hmx, int my, int mz) {
// const size_t pitch = tPtr.pitch;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
int threadDimx = 16;
int threadDimy = 16;
int blockDimx = hnx / threadDimx;
int blockDimy = ny*3 / threadDimy;
if (hnx%threadDimx != 0) blockDimx++;
if (ny*3%threadDimy != 0) blockDimy++;
dim3 nthread(threadDimx, threadDimy);
dim3 nBlock(blockDimx, blockDimy);
cufftResult res;
cudaError_t err;
cheby_pre_p2s <<<nBlock, nthread >> >((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my*3, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
res = CUFFTEXEC_C2C(planZ_X3, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
assert(res == CUFFT_SUCCESS);
//err = cudaDeviceSynchronize();
//assert(err == cudaSuccess);
cheby_post_p2s <<<nBlock, nthread >>>((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, my*3, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
}
__host__ void cheby_s2p_X6(cudaPitchedPtr tPtr, int hmx, int my, int mz) {
// const size_t pitch = tPtr.pitch;
//const int pz = mz / 2 + 1;
const int nx = (hmx - 1) * 2 / 3 * 2;
const int ny = my / 3 * 2;
const int hnx = nx / 2 + 1;
int threadDimx = 16;
int threadDimy = 16;
int blockDimx = hnx / threadDimx;
int blockDimy = 6*ny / threadDimy;
if (hnx%threadDimx != 0) blockDimx++;
if (6*ny%threadDimy != 0) blockDimy++;
dim3 nthread(threadDimx, threadDimy);
dim3 nBlock(blockDimx, blockDimy);
cufftResult res;
cudaError_t err;
cheby_pre_s2p_pad <<<nBlock, nthread >>>((cuRPCF::complex*)tPtr.ptr, tPtr.pitch, hmx, 6*my, mz);
#ifdef KERNEL_SYNCHRONIZED
err = cudaDeviceSynchronize();
assert(err == cudaSuccess);
#endif
res = CUFFTEXEC_C2C(planZ_X6, (CUFFTCOMPLEX*)tPtr.ptr,
(CUFFTCOMPLEX*)tPtr.ptr, CUFFT_FORWARD);
ASSERT(res == CUFFT_SUCCESS);
//err = cudaDeviceSynchronize();
//ASSERT(err == cudaSuccess);
} |
58b75e7f0f6b9201e62eb79b619c58cab09a23af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "cnn.h"
#include "layer.h"
#include "dataset.h"
#include "batch.h"
#include "debug_extra.h"
#include "error_handling.h"
#include "cudaKernels.h"
#include "timer.h"
double LEARNING_RATE = 0.003;
void writeMetrics(float time, float mse, float mcr)
{
char filename[15] = "Metrics";
FILE *fp = fopen(filename, "a");
if (fp == NULL) {
fprintf(stderr, "Cant open file!\n");
return;
}
fprintf(fp, "%2.6f ", time);
fprintf(fp, "%2.6f ", mse);
fprintf(fp, "%2.6f ", mcr);
fprintf(fp, "\n");
fprintf(stderr, "Writing metrics.. Done\n");
fclose(fp);
}
//1. handle subsampling layer weights and biases
//
int d_feed_forward(cnnlayer_t *headlayer, double *training_data, int *batch_indexes)
{
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
int csample = batch_indexes[batchctr];
int inp_vec_size = current->no_of_neurons;
//headlayer's neurons input = output
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int first_layer_idx = batchctr * inp_vec_size + input_data_ctr;
current->neurons_input[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
current->neurons_output[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
}
int outIdx = batchctr * inp_vec_size;
int outSize = inp_vec_size * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(¤t->d_neurons_output[outIdx], ¤t->neurons_output[outIdx], outSize, hipMemcpyHostToDevice));
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_input = current->d_neurons_output;
real_t* d_kernel = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(src_fmaps, dst_fmaps, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t) + fkernel * fkernel * sizeof(real_t);
hipLaunchKernelGGL(( convolve_device_2D), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_kernel, fkernel * fkernel);
hipLaunchKernelGGL(( compute_transfer_function), dim3(dst_fmaps), dim3(next_imw * next_imh) , 0, 0, d_output, d_biases, current->layer_type);
hipDeviceSynchronize();
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
// fully-connected layer
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_weights = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(dst_layer_size, 1, 1);
dim3 nThreads(src_layer_size, 1, 1);
int sh_mem_size = (2 * src_layer_size) * sizeof(real_t);
hipLaunchKernelGGL(( d_rear_DNN), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_weights);
hipLaunchKernelGGL(( compute_transfer_function), dim3(dst_layer_size), dim3(1) , 0, 0, d_output, d_biases, current->layer_type);
hipDeviceSynchronize();
}
else if (next_to_current->subsampling == true)
{
// How to perform average pooling
// Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
int* d_gradientMap = next_to_current->d_gradientMap;
dim3 nBlocks(src_fmaps, 1, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t);
hipLaunchKernelGGL(( d_subsampling), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_gradientMap, current->layer_type);
hipDeviceSynchronize();
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
return 0;
}
static __inline__ long long getticks( void )
{
unsigned a, d;
asm volatile("rdtsc" : "=a" (a), "=d" (d));
return ((long long)a) | (((long long)d) << 32);
}
int h_feed_forward(cnnlayer_t *headlayer, double *training_data, int *batch_indexes)
{
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
int csample = batch_indexes[batchctr];
int inp_vec_size = current->no_of_neurons;
//headlayer's neurons input = output
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int first_layer_idx = batchctr * inp_vec_size + input_data_ctr;
current->neurons_output[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
current->neurons_input[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
}
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
//convolution layers
int dst_layer_size = next_imw * next_imh * dst_fmaps;
int src_fmap_ctr = 0, dst_fmap_ctr = 0;
for (dst_fmap_ctr = 0; dst_fmap_ctr < dst_fmaps; dst_fmap_ctr++)
{
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
//weights do not involve batch counter
int weights_stidx = src_fmaps * dst_fmap_ctr * fkernel * fkernel;
int st_idx = weights_stidx + src_fmap_ctr * fkernel * fkernel;
real_t* filter = &(current->weights_matrix[st_idx]);
// Source layer feature maps starting index
int fmap_stidx = 0;
int src_layer_size = imh * imw * src_fmaps;
fmap_stidx = batchctr * src_layer_size + src_fmap_ctr * imh * imw;
// Destination (layer) feature map starting index
int dst_fmap_stidx = batchctr * dst_layer_size + dst_fmap_ctr * next_imh * next_imw;
int dst_fmap_unit_ctr = 0;
int hctr = 0;
int wctr = 0;
for (hctr = 0; hctr < imh; hctr++)
{
for (wctr = 0; wctr < imw; wctr++)
{
if ((hctr >= bmargin && wctr >= bmargin) && (hctr < imh - bmargin && wctr < imw - bmargin))
{
// Apply fitler kernel of size 5x5 to the input
int cidx = fmap_stidx + hctr * imw + wctr;
real_t sum = 0.0;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
sum = sum + filter[filterCtr] * current->neurons_output[cidx + convCtr1 * imw + convCtr2];
filterCtr++;
}
}
//save summation to destination feature map
int dst_idx = dst_fmap_stidx + dst_fmap_unit_ctr;
//next_to_current->neurons_input[dst_idx] += current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_idx] += sum;
if (src_fmap_ctr == src_fmaps - 1)
{
next_to_current->neurons_input[dst_idx] += current->biases[dst_fmap_ctr];
real_t cn = next_to_current->neurons_input[dst_idx];
next_to_current->neurons_input[dst_idx] = 0;
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(cn);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(cn);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(cn);
}
dst_fmap_unit_ctr++;
}
}
}
}
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
int dcounter = 0;
int scounter = 0;
real_t sum = 0.0;
for (dcounter = 0; dcounter < dst_layer_size; dcounter++)
{
sum = 0.0;
for (scounter = 0; scounter < src_layer_size; scounter++)
{
int sidx = batchctr * src_layer_size + scounter;
real_t cweight = current->weights_matrix[dcounter * src_layer_size + scounter];
real_t xdata = current->neurons_output[sidx];
sum += cweight * xdata;
}
int dst_idx = batchctr * dst_layer_size + dcounter;
next_to_current->neurons_input[dst_idx] = sum + current->biases[dcounter];
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(next_to_current->neurons_input[dst_idx]);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(next_to_current->neurons_input[dst_idx]);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(next_to_current->neurons_input[dst_idx]);
}
}
else if (next_to_current->subsampling == true)
{
//Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average (here max pooling) of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
// Subsampling goes here ...
int src_layer_size = imw * imh * src_fmaps;
int dst_fmap_size = next_imh * next_imw;
int dst_layer_size = next_imw * next_imh * dst_fmaps;
int src_fmap_ctr = 0;
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
int dst_fmap_ctr = src_fmap_ctr;
int fmap_stidx = batchctr * src_layer_size + src_fmap_ctr * imh * imw;
int next_fmap_stidx = batchctr * dst_layer_size + dst_fmap_ctr * dst_fmap_size;
real_t cweight = current->weights_matrix[src_fmap_ctr];
int wctr, hctr;
for (hctr = 0; hctr < imh; hctr += 2)
{
for (wctr = 0; wctr < imw; wctr += 2)
{
int cidx = fmap_stidx + hctr * imw + wctr;
real_t p01, p02, p03, p04;
p01 = current->neurons_output[cidx];
p02 = current->neurons_output[cidx + 1];
p03 = current->neurons_output[cidx + imw];
p04 = current->neurons_output[cidx + imw + 1];
int dhctr = hctr/2;
int dwctr = wctr/2;
int dst_pos = next_fmap_stidx + dhctr * next_imw + dwctr;
real_t spooling_result = 0; int poolingIdx1 = -1;
real_t pooled = 0;
if (next_to_current->pool_type == 1)
{
// average pooling
pooled = (p01 + p02 + p03 + p04)/4;
next_to_current->neurons_input[dst_pos] = current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_pos] += cweight * pooled;
}
else if (next_to_current->pool_type == 2)
{
// max pooling
int idx = 0;
pooled = pool_max(p01, p02, p03, p04, &idx, 4);
if (idx == 0) next_to_current->gradientMap[dst_pos] = cidx;
else if (idx == 1) next_to_current->gradientMap[dst_pos] = cidx + 1;
else if (idx == 2) next_to_current->gradientMap[dst_pos] = cidx + imw;
else if (idx == 3) next_to_current->gradientMap[dst_pos] = cidx + imw + 1;
next_to_current->neurons_input[dst_pos] = pooled;
}
else if (next_to_current->pool_type == 3)
{
spooling_result = stochastic_pooling(p01, p02, p03, p04, &poolingIdx1);
pooled = spooling_result;
if (poolingIdx1 == 0)
next_to_current->gradientMap[dst_pos] = cidx;
else if (poolingIdx1 == 1)
next_to_current->gradientMap[dst_pos] = cidx + 1;
else if (poolingIdx1 == 2)
next_to_current->gradientMap[dst_pos] = cidx + imw;
else if (poolingIdx1 == 3)
next_to_current->gradientMap[dst_pos] = cidx + imw + 1;
next_to_current->neurons_input[dst_pos] = pooled;
}
if (next_to_current->layer_type == 1)
{
next_to_current->neurons_output[dst_pos] = sigmoid(next_to_current->neurons_input[dst_pos]);
}
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_pos] = htangent(next_to_current->neurons_input[dst_pos]);
}
else if (next_to_current->layer_type == 3)
{
next_to_current->neurons_output[dst_pos] = reLUSoftPlus(next_to_current->neurons_input[dst_pos]);
}
}
}
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
return 0;
}
real_t sigmoid(real_t x)
{
real_t y = 0.0;
y = 1.0/(1 + exp(-x));
return y;
}
real_t htangent(real_t x)
{
return 1.7159 * tanh(0.66666 * x);
}
real_t reLUSoftPlus(real_t x)
{
if (x < 0)
return 0;
else
return x;
}
real_t dreLUSoftPlus(real_t x)
{
if (x < 0)
return 0;
else
return 1;
}
real_t dhtangent(real_t y)
{
real_t A = 1.7159;
real_t S = 2.0/3.0;
real_t d = A * S * (1 - y/A) * (1 + y/A);
return d;
}
real_t computeEntropy(real_t a11, real_t a12, real_t a13, real_t a14)
{
real_t sum = a11 + a12 + a13 + a14;
real_t e11 = 0, e12 = 0, e13 = 0, e14 = 0;
if (a11 != 0)
e11 = a11/sum * (real_t) log2((real_t)(a11/sum));
if (a12 != 0)
e12 = a12/sum * log2((real_t)(a12/sum));
if (a13 != 0)
e13 = a13/sum * log2((real_t)(a13/sum));
if (a14 != 0)
e14 = a14/sum * log2((real_t)(a14/sum));
real_t entropy = e11 + e12 + e13 + e14;
return entropy;
}
real_t stochastic_pooling(real_t a01, real_t a02, real_t a03, real_t a04, int* poolingIdx)
{
real_t sum = exp(a01) + exp(a02) + exp(a03) + exp(a04);
real_t p01 = exp(a01)/sum;
real_t p02 = exp(a02)/sum;
real_t p03 = exp(a03)/sum;
real_t p04 = exp(a04)/sum;
//cumulative distribution function (CDF)
real_t cdf[4] = {0, 0, 0, 0};
cdf[0] = p01;
cdf[1] = cdf[0] + p02;
cdf[2] = cdf[1] + p03;
cdf[3] = cdf[2] + p04;
real_t randSample = (real_t) rand() / (real_t) RAND_MAX;
if (randSample <= cdf[0])
{
*poolingIdx = 0;
return a01;
}
else if (randSample <= cdf[1])
{
*poolingIdx = 1;
return a02;
}
else if (randSample <= cdf[2])
{
*poolingIdx = 2;
return a03;
}
else
{
*poolingIdx = 3;
return a04;
}
}
real_t compute_mse(struct nnlayer* headlayer, int nouts, int* batch_indexes, unsigned char* lables)
{
struct nnlayer* current = headlayer;
struct nnlayer* lastlayer = NULL;
while (current != NULL)
{
if (current->next == NULL)
lastlayer = current;
current = current->next;
}
current = lastlayer;
int* desired_output = (int *) malloc(nouts * sizeof(int));
real_t mse = 0, avg_mse = 0;
int counter = 0;
for (counter = 0; counter < BATCH_SIZE; counter++)
{
// layer_type = 1 -> sigmoid, 2 -> tanh, 3 -> relu
if (current->layer_type == 1 || current->layer_type == 3 || current->layer_type == 0)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_output[doCtr] = 0;
}
else if (current->layer_type == 2)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_output[doCtr] = -1;
}
unsigned char cl = lables[batch_indexes[counter]];
desired_output[cl] = 1;
mse = 0.0;
int nctr = 0;
for (nctr = 0; nctr < nouts; nctr++)
{
real_t error = desired_output[nctr] - current->neurons_output[counter * nouts + nctr];
mse = mse + (error * error);
}
mse = mse/nouts;
avg_mse = avg_mse + mse;
}
free(desired_output);
return avg_mse/BATCH_SIZE;
}
void train_cnn(cnnlayer_t* headlayer, dataset_t* train_samples, dataset_t* test_samples)
{
int epoch_counter = 0;
int max_epoch = 50;
int *batch_indexes = (int *) malloc(sizeof(int) * train_samples->numVectors);
real_t min_mcr = 25.0;
bool_t gpu_turn = 1;
copy_hweights_to_dweights(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = d_compute_missclassification_rate(headlayer, test_samples);
printf("\n ===== BEFORE TRAINING ====");
printf("\n EpochCounter\t\tTEST SET");
printf("\n\n%6d\t\t\t%4.3f", epoch_counter, mcr_test_set);
printf("\n");
while (epoch_counter < max_epoch)
{
int nMcr = 1;
int nvecs = train_samples->numVectors;
int batch_count = nvecs/BATCH_SIZE;
mini_batching(batch_indexes, nvecs, true);
real_t avg_mse = 0;
int nouts = train_samples->lenlable;
long double time = 0.0;
real_t mcr = 0.0;
if (gpu_turn != 0)
{
GpuTimer timer;
long double elapsed = 0.0;
int bctr = 0; // counter in a batch
for (bctr = 0; bctr < batch_count; bctr++)
{
timer.Start();
d_feed_forward(headlayer, train_samples->data, &batch_indexes[bctr * BATCH_SIZE]);
d_compute_gradients_deltas(headlayer, nouts, train_samples->lables, &batch_indexes[bctr * BATCH_SIZE]);
d_update_weights(headlayer);
d_reset_vectors(headlayer);
timer.Stop();
float time_citer = timer.Elapsed();
elapsed += time_citer;
float mse = compute_mse(headlayer, nouts, &batch_indexes[bctr * BATCH_SIZE], train_samples->lables);
avg_mse += mse;
if (bctr % 1000 == 999) {
fprintf(stderr,"\nbctr/batch_count: %d/%d epoch_counter/max_epoch: %d/%d",
bctr + 1, batch_count,
epoch_counter + 1, max_epoch);
}
}
time = elapsed;
fprintf(stderr, "\n elapsed_time: %Lf", elapsed);
}
else
{
fprintf(stderr, "\n Feed Forward via CPU");
long double elapsed = 0.0;
int bctr = 0;
for (bctr = 0; bctr < batch_count; bctr++)
{
long start_ticks = getticks();
h_feed_forward(headlayer, train_samples->data, &batch_indexes[bctr * BATCH_SIZE]);
h_compute_gradients_deltas(headlayer, nouts, train_samples->lables, &batch_indexes[bctr * BATCH_SIZE]);
h_update_weights(headlayer);
reset_inputs_dweights_deltas(headlayer);
long end_ticks = getticks();
float time_citer = (float)(end_ticks - start_ticks)/3330000;
elapsed += time_citer;
float mse = compute_mse(headlayer, nouts, &batch_indexes[bctr * BATCH_SIZE], train_samples->lables);
avg_mse += mse;
if (bctr % 1000 == 999)
fprintf(stderr,"\nbctr/batch_count: %d/%d/%d MSE: %.5f", bctr + 1, batch_count, max_epoch, mse);
}
time = elapsed;
fprintf(stderr, "\n elapsed_time: %Lf", elapsed);
}
avg_mse = avg_mse / batch_count;
printf("\n Avg MSE: %f, epoch: %d", avg_mse, epoch_counter);
if (gpu_turn != 0 && epoch_counter % nMcr == 0)
{
copy_dweights_to_hweights(headlayer);
//display_weights_matrices(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = d_compute_missclassification_rate(headlayer, test_samples);
printf("\n =========================");
printf("\n EpochCounter\t\tTEST SET");
printf("\n\n%6d\t\t\t%4.3f", epoch_counter + 1, mcr_test_set);
printf("\n");
mcr = mcr_test_set;
d_reset_output_vectors(headlayer);
if (mcr_test_set < min_mcr)
{
fprintf(stderr,"Writing weight..");
char fn[4];
char fname[13] = "Checkpoint-";
sprintf (fn, "%d", epoch_counter);
strcat(fname, fn);
save_trained_network_weights(headlayer, fname);
min_mcr = mcr_test_set;
fprintf(stderr,"\t\tWriting weights done!\n\n");
}
}
else if (gpu_turn == 0 && epoch_counter % nMcr == 0)
{
//display_weights_matrices(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = h_compute_missclassification_rate(headlayer, test_samples);
printf("\n =========================");
printf("\n EpochCounter TEST SET");
printf("\n\n %d %f ", epoch_counter + 1, mcr_test_set);
printf("\n");
mcr = mcr_test_set;
reset_inputs_dweights_deltas(headlayer);
if (mcr_test_set < min_mcr)
{
char fn[4];
char fname[13] = "Checkpoint-";
sprintf (fn, "%d", epoch_counter);
strcat(fname, fn);
save_trained_network_weights(headlayer, fname);
min_mcr = mcr_test_set;
}
}
writeMetrics(time, avg_mse, mcr);
if (epoch_counter % 5 == 0)
LEARNING_RATE = LEARNING_RATE * 0.93;
epoch_counter++;
}
fprintf(stderr,"\n");
free(batch_indexes);
}
void d_compute_gradients_deltas(cnnlayer_t *headlayer, int nouts, unsigned char* desired_output, int* batch_indexes)
{
int *desired_vec = (int *) malloc(sizeof(int) * nouts);
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer->next;
cnnlayer_t* lastlayer = NULL;
/* Reaching the last layer and propagating error gradients backwards */
while (current != NULL)
{
if (current->next == NULL)
{
lastlayer = current;
break;
}
current = current->next;
}
current = lastlayer;
int num_neurons = current->no_of_neurons, doCtr = 0;
if (current->layer_type == 1 || current->layer_type == 3)
{
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = 0;
}
else if (current->layer_type == 2)
{
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = -1;
}
int d_idx = desired_output[batch_indexes[batchctr]];
desired_vec[d_idx] = 1;
int outSize = current->no_of_neurons * BATCH_SIZE * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(current->neurons_output, current->d_neurons_output, outSize, hipMemcpyDeviceToHost));
int ectr = 0;
for (ectr = 0; ectr < num_neurons; ectr++)
{
int b_idx = batchctr * num_neurons + ectr;
current->error_deltas[b_idx] = (current->neurons_output[b_idx] - desired_vec[ectr]);
//printf("\n Output: %f", current->neurons_output[b_idx]);
}
// Copy error_deltas to GPU global memory
HANDLE_ERROR(hipMemcpy(current->d_error_deltas, current->error_deltas, outSize, hipMemcpyHostToDevice));
current = lastlayer->previous;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (current->previous == NULL)
flag = false;
//back propagate the error deltas from here
int curr_height = current->fmap_height;
int curr_width = current->fmap_width;
int curr_fmap_size = curr_height * curr_width;
int prev_height = lastlayer->fmap_height;
int prev_width = lastlayer->fmap_width;
int prev_fmap_size = prev_height * prev_width;
if (current->fkernel == 1 && lastlayer->subsampling == false)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_delta_biases = current->d_delta_biases;
int nBlocks = lastlayer->no_of_neurons;
int nThreads = current->no_of_neurons;
int sh_mem_size = (2 * current->no_of_neurons + 1) * sizeof(real_t);
hipLaunchKernelGGL(( d_rear_DNN_errorbp), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_lerr_deltas, d_cerr_deltas, d_weights, d_delta_weights, d_delta_biases);
sh_mem_size = (current->no_of_neurons) * sizeof(real_t);
hipLaunchKernelGGL(( d_rear_DNN_update_error_deltas), dim3(1), dim3(nThreads), sh_mem_size, 0, d_output, d_cerr_deltas, current->layer_type);
hipDeviceSynchronize();
/* For Debugging purpose only */
//int wSize = current->no_of_neurons * lastlayer->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(hipMemcpy(current->delta_weights, d_delta_weights, wSize, hipMemcpyDeviceToHost));
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(hipMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, hipMemcpyDeviceToHost));
}
else if (current->fkernel != 1 && lastlayer->subsampling == false)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_delta_biases = current->d_delta_biases;
int kerSize = current->fkernel * current->fkernel;
dim3 nBlocks(current->no_of_fmaps, lastlayer->no_of_fmaps, 1);
dim3 nThreads(current->fmap_width, current->fmap_height, 1);
int sh_mem_size = (prev_fmap_size + curr_fmap_size + kerSize + 1) * sizeof(real_t);
hipLaunchKernelGGL(( errorbp_convolution_layers2), dim3(nBlocks), dim3(nThreads), sh_mem_size , 0, d_output, d_lerr_deltas, d_cerr_deltas, d_weights, d_delta_weights, kerSize);
nBlocks.x = lastlayer->no_of_fmaps; nBlocks.y = 1; nBlocks.z = 1;
nThreads.x = lastlayer->fmap_width; nThreads.y = lastlayer->fmap_height; nThreads.z = 1;
hipLaunchKernelGGL(( errorbp_convolution_update_biases), dim3(nBlocks), dim3(nThreads), 0, 0, d_lerr_deltas, d_delta_biases);
nBlocks.x = current->no_of_fmaps;
nBlocks.y = nBlocks.z = 1;
nThreads.x = current->fmap_width * current->fmap_height;
nThreads.y = nThreads.z = 1;
sh_mem_size = current->fmap_width * current->fmap_height * sizeof(real_t);
hipLaunchKernelGGL(( d_update_error_deltas), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_cerr_deltas, current->layer_type);
//int wSize = (current->no_of_fmaps * lastlayer->no_of_fmaps * kerSize) * sizeof(real_t);
//HANDLE_ERROR(hipMemcpy(current->delta_weights, d_delta_weights, wSize, hipMemcpyDeviceToHost));
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(hipMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, hipMemcpyDeviceToHost));
}
else if (lastlayer->subsampling == true)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
int* d_gradientMap = lastlayer->d_gradientMap;
dim3 nBlocks(current->no_of_fmaps, 1, 1);
dim3 nThreads(prev_width, prev_height, 1);
int layer_type = current->layer_type;
int sh_mem_size = (2 * prev_width * prev_height) * sizeof(real_t);
hipLaunchKernelGGL(( d_errbp_subsampling), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_lerr_deltas, d_cerr_deltas, d_gradientMap, layer_type);
hipDeviceSynchronize();
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(hipMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, hipMemcpyDeviceToHost));
}
if (flag == true)
{
lastlayer = current;
current = current->previous;
}
}
}
free(desired_vec);
}
void h_compute_gradients_deltas(cnnlayer_t *headlayer, int nouts, unsigned char* desired_output, int* batch_indexes)
{
int *desired_vec = (int *) malloc(sizeof(int) * nouts);
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer->next; //skipped input layer
cnnlayer_t* lastlayer = NULL;
while (current != NULL)
{
int num_neurons = current->no_of_neurons;
int nctr = 0;
for (nctr = 0; nctr < num_neurons; nctr++)
{
int idx = batchctr * num_neurons + nctr;
if (current->layer_type == 1)
current->dy_output[idx] = current->neurons_output[idx] * (1 - current->neurons_output[idx]);
else if (current->layer_type == 2)
current->dy_output[idx] = dhtangent(current->neurons_output[idx]);
else if (current->layer_type == 3)
current->dy_output[idx] = dreLUSoftPlus(current->neurons_output[idx]);
}
if (current->next == NULL)
{
lastlayer = current;
break;
}
current = current->next;
}
current = lastlayer;
// compute error for last layer
int num_neurons = current->no_of_neurons;
if (current->layer_type == 1 || current->layer_type == 3)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = 0;
}
else if (current->layer_type == 2)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = -1;
}
int d_idx = desired_output[batch_indexes[batchctr]];
desired_vec[d_idx] = 1;
int ectr = 0;
for (ectr = 0; ectr < num_neurons; ectr++)
{
int b_idx = batchctr * num_neurons + ectr;
current->error_deltas[b_idx] = (current->neurons_output[b_idx] - desired_vec[ectr]);
//printf("\n Output: %f", current->neurons_output[b_idx]);
}
current = lastlayer->previous;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (current->previous == NULL)
flag = false;
//back propagate the error deltas from here
int curr_no_fmaps = current->no_of_fmaps;
int curr_height = current->fmap_height;
int curr_width = current->fmap_width;
int curr_fmap_size = curr_height * curr_width;
int prev_no_fmaps = lastlayer->no_of_fmaps;
int prev_height = lastlayer->fmap_height;
int prev_width = lastlayer->fmap_width;
int prev_fmap_size = prev_height * prev_width;
int no_neurons_prev = prev_fmap_size * prev_no_fmaps;
int no_neurons_curr = curr_fmap_size * curr_no_fmaps;
if (current->fkernel == 1 && lastlayer->subsampling == false)
{
int playerUnitIdx = 0, clayerUnitIdx = 0;
for (clayerUnitIdx = 0; clayerUnitIdx < no_neurons_curr; clayerUnitIdx++)
{
real_t sum = 0.0;
int cUnitIdx = batchctr * no_neurons_curr + clayerUnitIdx;
for (playerUnitIdx = 0; playerUnitIdx < no_neurons_prev; playerUnitIdx++)
{
//for each of the neuron,we have dy_output
int pUnitIdx = batchctr * no_neurons_prev + playerUnitIdx;
int wIdx = playerUnitIdx * no_neurons_curr + clayerUnitIdx;
current->delta_weights[wIdx] += lastlayer->error_deltas[pUnitIdx] * current->neurons_output[cUnitIdx];
sum += lastlayer->error_deltas[pUnitIdx] * current->weights_matrix[wIdx];
}
current->error_deltas[cUnitIdx] = current->dy_output[cUnitIdx] * sum;
//printf("\n dy:%f, sum: %f", current->dy_output[cUnitIdx], sum);
}
for (playerUnitIdx = 0; playerUnitIdx < no_neurons_prev; playerUnitIdx++)
{
current->delta_biases[playerUnitIdx] += lastlayer->error_deltas[playerUnitIdx] * 1.0;
}
}
else if (current->fkernel != 1 && lastlayer->subsampling == false)
{
//convolutional layer with kernel of 5x5
int indexes[25];
int pfmapctr, cfmapctr;
for (cfmapctr = 0; cfmapctr < curr_no_fmaps; cfmapctr++)
{
int curr_fmap_stidx = batchctr * no_neurons_curr + cfmapctr * curr_fmap_size;
int iwstidx = cfmapctr * current->fkernel * current->fkernel;
for (pfmapctr = 0; pfmapctr < prev_no_fmaps; pfmapctr++)
{
int prev_fmap_stidx = batchctr * no_neurons_prev + pfmapctr * prev_fmap_size;
int fwstidx = iwstidx + pfmapctr * (curr_no_fmaps * current->fkernel * current->fkernel);
int i;
for (i = 0; i < prev_fmap_size; i++)
{
int tx, ty;
tx = i % lastlayer->fmap_width;
ty = i / lastlayer->fmap_width;
int bmargin = floor(current->fkernel/2);
int stx, sty;
stx = tx + bmargin;
sty = ty + bmargin;
//in the source fmap
int center = sty * current->fmap_width + stx;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
indexes[filterCtr] = center + convCtr1 * current->fmap_width + convCtr2;
filterCtr++;
}
}
int player_idx = prev_fmap_stidx + i;
int iter = 0;
for (iter = 0; iter < current->fkernel * current->fkernel; iter++)
{
int clayer_idx = curr_fmap_stidx + indexes[iter];
int weights_idx = fwstidx + iter;
current->delta_weights[weights_idx] += lastlayer->error_deltas[player_idx] * current->neurons_output[clayer_idx];
current->error_deltas[clayer_idx] += (current->weights_matrix[weights_idx] * lastlayer->error_deltas[player_idx] * current->dy_output[clayer_idx]);
}
if (cfmapctr == 0)
current->delta_biases[pfmapctr] += lastlayer->error_deltas[player_idx] * 1.0;
}
}
}
}
else if (lastlayer->subsampling == true)
{
int sindexes[4];
int pfmapCtr = 0;
for (pfmapCtr = 0; pfmapCtr < prev_no_fmaps; pfmapCtr++)
{
int pstidx = batchctr * no_neurons_prev + pfmapCtr * prev_fmap_size; //0, 25
int cfmapCtr = pfmapCtr;
int cstidx = batchctr * no_neurons_curr + cfmapCtr * curr_fmap_size;
int pfmUnitctr = 0;
for (pfmUnitctr = 0; pfmUnitctr < prev_fmap_size; pfmUnitctr++)
{
int player_idx = pstidx + pfmUnitctr;
int px = pfmUnitctr % lastlayer->fmap_width;
int py = pfmUnitctr / lastlayer->fmap_height;
int sx = px * 2;
int sy = py * 2;
int clUnitIdx = sy * current->fmap_width + sx;
sindexes[0] = cstidx + clUnitIdx;
sindexes[1] = cstidx + clUnitIdx + 1;
sindexes[2] = cstidx + clUnitIdx + curr_width;
sindexes[3] = cstidx + clUnitIdx + curr_width + 1;
if (current->pool_type == 1)
{
int j = 0;
for (j = 0; j < 4; j++)
{
current->delta_weights[cfmapCtr] += lastlayer->error_deltas[player_idx] * current->neurons_output[sindexes[j]];
current->error_deltas[sindexes[j]] = (current->weights_matrix[cfmapCtr] * lastlayer->error_deltas[player_idx]) * current->dy_output[sindexes[j]];
}
current->delta_biases[cfmapCtr] += lastlayer->error_deltas[player_idx] * 1.0;
}
else if (current->pool_type == 2)
{
int gradientIdx = lastlayer->gradientMap[player_idx];
//curent->delta_weights[cfmpCt]+=lastlayer->error_deltas[player_idx]*current->neurons_output[gradientIdx];
current->error_deltas[gradientIdx] = lastlayer->error_deltas[player_idx] * current->dy_output[gradientIdx];
}
else if (current->pool_type == 3)
{
int gradientIdx = lastlayer->gradientMap[player_idx];
current->error_deltas[gradientIdx] = lastlayer->error_deltas[player_idx] * current->dy_output[gradientIdx];
//current->delta_biases[cfmapCtr] += lastlayer->error_deltas[player_idx] * 1.0;
}
}
}
}
if (flag == true)
{
lastlayer = current;
current = current->previous;
}
}
}
free(desired_vec);
}
//accumulate weight deltas
void average_deltas(struct nnlayer* headlayer)
{
if (BATCH_SIZE > 1)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
int no_dweights = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel;
int cctr = 0;
for (cctr = 0; cctr < no_dweights; cctr++)
{
current->delta_weights[cctr] = current->delta_weights[cctr]/BATCH_SIZE;
}
//biases
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int curr_count = current->no_of_neurons;
int next_curr_count = next_to_current->no_of_neurons;
int cctr = 0;
int ncctr = 0;
for (cctr = 0; cctr < curr_count; cctr++)
{
for (ncctr = 0; ncctr < next_curr_count; ncctr++)
{
int idx = cctr * next_curr_count + ncctr;
current->delta_weights[idx] = current->delta_weights[idx]/BATCH_SIZE;
}
}
//biases
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
else if (next_to_current->subsampling == true)
{
// Subsampling layer
int count = current->no_of_fmaps;
int counter = 0;
for (counter = 0; counter < count; counter++)
{
current->delta_weights[counter] = current->delta_weights[counter]/BATCH_SIZE;
}
//biases
int cctr = 0;
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
}
void h_update_weights(struct nnlayer* headlayer)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int ndweights = 0;
if (next_to_current->subsampling == false)
ndweights = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel;
else
ndweights = current->no_of_fmaps;
int counter = 0;
for (counter = 0; counter < ndweights; counter++)
{
current->weights_matrix[counter] -= LEARNING_RATE * current->delta_weights[counter];
}
for (counter = 0; counter < next_to_current->no_of_fmaps; counter++)
{
current->biases[counter] -= LEARNING_RATE * current->delta_biases[counter];
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void d_update_weights(struct nnlayer* headlayer)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_biases = current->d_biases;
real_t* d_delta_biases = current->d_delta_biases;
if (next_to_current->subsampling == true)
{
int nBlocks = current->no_of_fmaps;
int nThreads = 1;
int sh_mem_size = sizeof(real_t);
hipLaunchKernelGGL(( d_update_weights_kernel), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_weights, d_delta_weights, LEARNING_RATE);
int nwSize = current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(current->weights_matrix, current->d_weights, nwSize, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( d_update_biases_kernel), dim3(next_to_current->no_of_fmaps), dim3(1) , 0, 0, d_biases, d_delta_biases, LEARNING_RATE);
HANDLE_ERROR(hipMemcpy(current->biases, current->d_biases, nwSize, hipMemcpyDeviceToHost));
}
else
{
dim3 nBlocks(current->no_of_fmaps, next_to_current->no_of_fmaps, 1);
dim3 nThreads(current->fkernel, current->fkernel, 1);
int sh_mem_size = 2 * current->fkernel * current->fkernel * sizeof(real_t);
hipLaunchKernelGGL(( d_update_weights_kernel), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_weights, d_delta_weights, LEARNING_RATE);
int nwSize = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(current->weights_matrix, current->d_weights, nwSize, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( d_update_biases_kernel), dim3(next_to_current->no_of_fmaps), dim3(1) , 0, 0, d_biases, d_delta_biases, LEARNING_RATE);
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(current->biases, current->d_biases, nbSize, hipMemcpyDeviceToHost));
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void hd_reset_biases(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_biases, 0, nbSize));
memset(current->biases, 0, nbSize);
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void d_reset_vectors(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int fk = current->fkernel;
if (next_to_current->subsampling == true)
{
int nwSize = current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_delta_weights, 0, nwSize));
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_delta_biases, 0, nbSize));
}
else
{
int nwSize = current->no_of_fmaps * next_to_current->no_of_fmaps * fk * fk * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_delta_weights, 0, nwSize));
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_delta_biases, 0, nbSize));
}
int noSize = current->no_of_fmaps * current->fmap_width * current->fmap_height * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_neurons_output, 0, noSize));
HANDLE_ERROR(hipMemset(current->d_error_deltas, 0, noSize));
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
else
{
//this is the very last layer
int noSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(hipMemset(next_to_current->d_neurons_output, 0, noSize));
HANDLE_ERROR(hipMemset(next_to_current->d_error_deltas, 0, noSize));
}
}
}
void d_reset_output_vectors(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int noSize = current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(hipMemset(current->d_neurons_output, 0, noSize));
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
else
{
int noSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(hipMemset(next_to_current->d_neurons_output, 0, noSize));
}
}
}
int sign(real_t n1, real_t n2)
{
if (n1 * n2 > 0)
return 1;
else if (n1 * n2 < 0)
return -1;
else
return 0;
}
void reset_inputs_dweights_deltas(cnnlayer_t* headlayer)
{
//printf("\nreset_dweight_deltas");
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_nfmaps = current->no_of_fmaps;
int dst_nfmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int weight_matrix_size = 0, biases_size = 0;
if (next_to_current->subsampling == true)
{
weight_matrix_size = src_nfmaps;
biases_size = dst_nfmaps;
}
else
{
weight_matrix_size = src_nfmaps * dst_nfmaps * fkernel * fkernel;
biases_size = dst_nfmaps;
}
int counter = 0;
for (counter = 0; counter < weight_matrix_size; counter++)
current->delta_weights[counter] = 0.0;
for (counter = 0; counter < biases_size; counter++)
current->delta_biases[counter] = 0.0;
//reset error deltas and neurons_input fields
int reset_size = current->no_of_neurons * BATCH_SIZE;
for (counter = 0; counter < reset_size; counter++)
{
current->error_deltas[counter] = 0.0;
current->neurons_input[counter] = 0.0;
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
// the last layer next_to_current does not collect +=,
// neither neurons_input[] nor error_deltas, but GPU version needs
// it to be cleared
}
}
real_t h_compute_missclassification_rate(cnnlayer_t *headlayer, dataset_t *samples)
{
fprintf(stderr, "\n computing MCR, No. of samples: %d\n, Progress: ", samples->numVectors);
int mcr = 0;
int datactr = 0;
for (datactr = 0; datactr < samples->numVectors; datactr++)
{
if (datactr % 1000 == 0)
fprintf(stderr, ".");
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL)
{
int resetctr = 0;
for (resetctr = 0; resetctr < current->no_of_neurons; resetctr++)
{
current->neurons_input[resetctr] = 0.0;
current->neurons_output[resetctr] = 0.0;
}
current = current->next;
}
current = headlayer;
next_to_current = current->next;
int inp_vec_size = current->no_of_neurons;
int desired_label = samples->lables[datactr];
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int inputIdx = datactr * inp_vec_size + input_data_ctr;
current->neurons_input[input_data_ctr] = samples->data[inputIdx];
current->neurons_output[input_data_ctr] = samples->data[inputIdx];
}
flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
//convolution layers
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
//for the first layer, output = input
int sctr = 0, dctr = 0;
for (dctr = 0; dctr < dst_fmaps; dctr++)
{
for (sctr = 0; sctr < src_fmaps; sctr++)
{
int weights_stidx = dctr * fkernel * fkernel * src_fmaps;
int st_idx = weights_stidx + sctr * fkernel * fkernel;
real_t* filter = NULL;
filter = &(current->weights_matrix[st_idx]);
int fmap_stidx = sctr * imh * imw;
//destination feature map
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
int dst_fmap_stidx = dctr * next_imh * next_imw;
int dst_fmap_ctr = 0;
int hctr = 0;
int wctr = 0;
for (hctr = 0; hctr < imh; hctr++)
{
for (wctr = 0; wctr < imw; wctr++)
{
if ((hctr >= bmargin && wctr >= bmargin) && (hctr < imh - bmargin && wctr < imw - bmargin))
{
int cidx = fmap_stidx + hctr * imw + wctr;
real_t sum = 0.0;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
sum = sum + filter[filterCtr] * current->neurons_output[cidx + convCtr1 * imw + convCtr2];
filterCtr++;
}
}
//save summation to destination feature map
int dst_idx = dst_fmap_stidx + dst_fmap_ctr;
next_to_current->neurons_input[dst_idx] += sum;
//applying transfer function
if (sctr == src_fmaps - 1)
{
next_to_current->neurons_input[dst_idx] += current->biases[dctr];
real_t cn = next_to_current->neurons_input[dst_idx];
next_to_current->neurons_input[dst_idx] = 0;
if (current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(cn);
else if (current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(cn);
}
else if (current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(cn);
}
dst_fmap_ctr++;
}
}
}
}
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
int dcounter = 0;
int scounter = 0;
real_t sum = 0.0;
for (dcounter = 0; dcounter < dst_layer_size; dcounter++)
{
sum = 0.0;
for (scounter = 0; scounter < src_layer_size; scounter++)
{
real_t cweight = 0.0;
cweight = current->weights_matrix[dcounter * src_layer_size + scounter];
real_t xdata = 0;
xdata = current->neurons_output[scounter];
sum += cweight * xdata;
}
next_to_current->neurons_input[dcounter] = sum + current->biases[dcounter];
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dcounter] = sigmoid(next_to_current->neurons_input[dcounter]);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dcounter] = htangent(next_to_current->neurons_input[dcounter]);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dcounter] = reLUSoftPlus(next_to_current->neurons_input[dcounter]);
}
}
else if (current->fkernel == 1 && next_to_current->subsampling == true)
{
// Subsampling goes here ...
int src_fmaps = current->no_of_fmaps;
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
int dst_fmap_size = next_imh * next_imw;
int src_fmap_ctr = 0;
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
int dst_fmap_ctr = src_fmap_ctr;
int fmap_stidx = src_fmap_ctr * imh * imw;
int next_fmap_stidx = dst_fmap_ctr * dst_fmap_size;
real_t cweight = current->weights_matrix[src_fmap_ctr];
int wctr = 0, hctr = 0;
for (hctr = 0; hctr < imh; hctr += 2)
{
for (wctr = 0; wctr < imw; wctr += 2)
{
int cidx = fmap_stidx + hctr * imw + wctr;
int dhctr = hctr/2;
int dwctr = wctr/2;
int dst_pos = next_fmap_stidx + dhctr * next_imw + dwctr;
real_t p1, p2, p3, p4;
p1 = current->neurons_output[cidx];
p2 = current->neurons_output[cidx + 1];
p3 = current->neurons_output[cidx + imw];
p4 = current->neurons_output[cidx + imw + 1];
//max = pool_max(p1, p2, p3, p4, 4);
real_t sum = (p1 + p2 + p3 + p4);
real_t pooled = 0;
if (current->pool_type == 1)
{
pooled = sum/4;
next_to_current->neurons_input[dst_pos] = current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_pos] += pooled * cweight;
}
else if (current->pool_type == 2)
{
int idx = 0;
pooled = pool_max(p1, p2, p3, p4, &idx, 4);
next_to_current->neurons_input[dst_pos] = pooled;
}
else if (current->pool_type == 3)
{
pooled = (p1 + p2 + p3 + p4)/4;
next_to_current->neurons_input[dst_pos] = pooled;
}
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_pos] = sigmoid(next_to_current->neurons_input[dst_pos]);
if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_pos] = htangent(next_to_current->neurons_input[dst_pos]);
}
if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_pos] = reLUSoftPlus(next_to_current->neurons_input[dst_pos]);
}
}
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
//we are at the last layer and we can compute miss classification rate over here
if (flag == false)
{
int mctr = 0;
real_t max = next_to_current->neurons_output[0];
int maxidx = 0;
for (mctr = 0; mctr < samples->lenlable; mctr++)
{
if (next_to_current->neurons_output[mctr] > max)
{
max = next_to_current->neurons_output[mctr];
maxidx = mctr;
}
}
if(desired_label != maxidx)
mcr++;
}
}
}
return ((real_t) mcr/(real_t)(samples->numVectors) * 100);
}
real_t d_compute_missclassification_rate(cnnlayer_t *headlayer, dataset_t* samples)
{
int d_mcr = 0;
int sampleCtr = 0;
int corr = (float)rand() / (float)RAND_MAX * 1000; int miss = (float)rand() / (float)RAND_MAX * 100;
for (sampleCtr = 0; sampleCtr < samples->numVectors; sampleCtr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
//This is needed as neurons_output accumulates input (+=)
d_reset_output_vectors(headlayer);
int inp_vec_size = current->no_of_neurons;
int desired_label = samples->lables[sampleCtr];
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int inputIdx = sampleCtr * inp_vec_size + input_data_ctr;
current->neurons_input[input_data_ctr] = samples->data[inputIdx];
current->neurons_output[input_data_ctr] = samples->data[inputIdx];
}
int outSize = inp_vec_size * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(current->d_neurons_output, current->neurons_output, outSize, hipMemcpyHostToDevice));
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel; // Kernel size
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_input = current->d_neurons_output;
real_t* d_kernel = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(src_fmaps, dst_fmaps, 1); // Grid-dimension -> (src_fmaps * dst_fmaps) blocks
dim3 nThreads(imw, imh, 1); // Block-dimension -> (imw * imh) threads/block
int sh_mem_size = imw * imh * sizeof(real_t) + fkernel * fkernel * sizeof(real_t);
hipLaunchKernelGGL(( convolve_device_2D), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_kernel, fkernel * fkernel);
hipLaunchKernelGGL(( compute_transfer_function), dim3(dst_fmaps), dim3(next_imw * next_imh) , 0, 0, d_output, d_biases, current->layer_type);
hipDeviceSynchronize();
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_weights = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(dst_layer_size, 1, 1);
dim3 nThreads(src_layer_size, 1, 1);
int sh_mem_size = (2 * src_layer_size) * sizeof(real_t);
hipLaunchKernelGGL(( d_rear_DNN), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_weights);
hipLaunchKernelGGL(( compute_transfer_function), dim3(dst_layer_size), dim3(1) , 0, 0, d_output, d_biases, current->layer_type);
hipDeviceSynchronize();
}
else if (next_to_current->subsampling == true)
{
// How to perform average pooling
// Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
int* d_gradientMap = next_to_current->d_gradientMap;
dim3 nBlocks(src_fmaps, 1, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t);
hipLaunchKernelGGL(( d_subsampling), dim3(nBlocks), dim3(nThreads), sh_mem_size, 0, d_output, d_input, d_gradientMap, current->layer_type);
hipDeviceSynchronize();
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
if (flag == false)
{
int noutSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(hipMemcpy(next_to_current->neurons_output, next_to_current->d_neurons_output, noutSize, hipMemcpyDeviceToHost));
int mctr = 0;
real_t max = next_to_current->neurons_output[0];
int maxidx = 0;
for (mctr = 0; mctr < samples->lenlable; mctr++)
{
if (next_to_current->neurons_output[mctr] > max)
{
max = next_to_current->neurons_output[mctr];
maxidx = mctr;
}
}
if(desired_label != maxidx) {
if (miss == 1) {
display_layer(headlayer);
fprintf(stderr, "\nGround Truth: %d", desired_label);
fprintf(stderr, "\nPredicted: %d\n\n", maxidx);
}
miss--;
d_mcr++;
}
else {
if (corr == 1) {
display_layer(headlayer);
fprintf(stderr, "\nGround Truth: %d", desired_label);
fprintf(stderr, "\nPredicted: %d\n\n", maxidx);
}
corr--;
}
}
}
}
return ((real_t) d_mcr/(real_t)(samples->numVectors) * 100);
}
| 58b75e7f0f6b9201e62eb79b619c58cab09a23af.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "cnn.h"
#include "layer.h"
#include "dataset.h"
#include "batch.h"
#include "debug_extra.h"
#include "error_handling.h"
#include "cudaKernels.h"
#include "timer.h"
double LEARNING_RATE = 0.003;
void writeMetrics(float time, float mse, float mcr)
{
char filename[15] = "Metrics";
FILE *fp = fopen(filename, "a");
if (fp == NULL) {
fprintf(stderr, "Cant open file!\n");
return;
}
fprintf(fp, "%2.6f ", time);
fprintf(fp, "%2.6f ", mse);
fprintf(fp, "%2.6f ", mcr);
fprintf(fp, "\n");
fprintf(stderr, "Writing metrics.. Done\n");
fclose(fp);
}
//1. handle subsampling layer weights and biases
//
int d_feed_forward(cnnlayer_t *headlayer, double *training_data, int *batch_indexes)
{
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
int csample = batch_indexes[batchctr];
int inp_vec_size = current->no_of_neurons;
//headlayer's neurons input = output
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int first_layer_idx = batchctr * inp_vec_size + input_data_ctr;
current->neurons_input[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
current->neurons_output[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
}
int outIdx = batchctr * inp_vec_size;
int outSize = inp_vec_size * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(¤t->d_neurons_output[outIdx], ¤t->neurons_output[outIdx], outSize, cudaMemcpyHostToDevice));
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_input = current->d_neurons_output;
real_t* d_kernel = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(src_fmaps, dst_fmaps, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t) + fkernel * fkernel * sizeof(real_t);
convolve_device_2D<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_kernel, fkernel * fkernel);
compute_transfer_function<<<dst_fmaps, next_imw * next_imh >>>(d_output, d_biases, current->layer_type);
cudaDeviceSynchronize();
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
// fully-connected layer
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_weights = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(dst_layer_size, 1, 1);
dim3 nThreads(src_layer_size, 1, 1);
int sh_mem_size = (2 * src_layer_size) * sizeof(real_t);
d_rear_DNN<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_weights);
compute_transfer_function<<< dst_layer_size, 1 >>>(d_output, d_biases, current->layer_type);
cudaDeviceSynchronize();
}
else if (next_to_current->subsampling == true)
{
// How to perform average pooling
// Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
int* d_gradientMap = next_to_current->d_gradientMap;
dim3 nBlocks(src_fmaps, 1, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t);
d_subsampling<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_gradientMap, current->layer_type);
cudaDeviceSynchronize();
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
return 0;
}
static __inline__ long long getticks( void )
{
unsigned a, d;
asm volatile("rdtsc" : "=a" (a), "=d" (d));
return ((long long)a) | (((long long)d) << 32);
}
int h_feed_forward(cnnlayer_t *headlayer, double *training_data, int *batch_indexes)
{
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
int csample = batch_indexes[batchctr];
int inp_vec_size = current->no_of_neurons;
//headlayer's neurons input = output
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int first_layer_idx = batchctr * inp_vec_size + input_data_ctr;
current->neurons_output[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
current->neurons_input[first_layer_idx] = (training_data + csample * inp_vec_size)[input_data_ctr];
}
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
//convolution layers
int dst_layer_size = next_imw * next_imh * dst_fmaps;
int src_fmap_ctr = 0, dst_fmap_ctr = 0;
for (dst_fmap_ctr = 0; dst_fmap_ctr < dst_fmaps; dst_fmap_ctr++)
{
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
//weights do not involve batch counter
int weights_stidx = src_fmaps * dst_fmap_ctr * fkernel * fkernel;
int st_idx = weights_stidx + src_fmap_ctr * fkernel * fkernel;
real_t* filter = &(current->weights_matrix[st_idx]);
// Source layer feature maps starting index
int fmap_stidx = 0;
int src_layer_size = imh * imw * src_fmaps;
fmap_stidx = batchctr * src_layer_size + src_fmap_ctr * imh * imw;
// Destination (layer) feature map starting index
int dst_fmap_stidx = batchctr * dst_layer_size + dst_fmap_ctr * next_imh * next_imw;
int dst_fmap_unit_ctr = 0;
int hctr = 0;
int wctr = 0;
for (hctr = 0; hctr < imh; hctr++)
{
for (wctr = 0; wctr < imw; wctr++)
{
if ((hctr >= bmargin && wctr >= bmargin) && (hctr < imh - bmargin && wctr < imw - bmargin))
{
// Apply fitler kernel of size 5x5 to the input
int cidx = fmap_stidx + hctr * imw + wctr;
real_t sum = 0.0;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
sum = sum + filter[filterCtr] * current->neurons_output[cidx + convCtr1 * imw + convCtr2];
filterCtr++;
}
}
//save summation to destination feature map
int dst_idx = dst_fmap_stidx + dst_fmap_unit_ctr;
//next_to_current->neurons_input[dst_idx] += current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_idx] += sum;
if (src_fmap_ctr == src_fmaps - 1)
{
next_to_current->neurons_input[dst_idx] += current->biases[dst_fmap_ctr];
real_t cn = next_to_current->neurons_input[dst_idx];
next_to_current->neurons_input[dst_idx] = 0;
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(cn);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(cn);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(cn);
}
dst_fmap_unit_ctr++;
}
}
}
}
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
int dcounter = 0;
int scounter = 0;
real_t sum = 0.0;
for (dcounter = 0; dcounter < dst_layer_size; dcounter++)
{
sum = 0.0;
for (scounter = 0; scounter < src_layer_size; scounter++)
{
int sidx = batchctr * src_layer_size + scounter;
real_t cweight = current->weights_matrix[dcounter * src_layer_size + scounter];
real_t xdata = current->neurons_output[sidx];
sum += cweight * xdata;
}
int dst_idx = batchctr * dst_layer_size + dcounter;
next_to_current->neurons_input[dst_idx] = sum + current->biases[dcounter];
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(next_to_current->neurons_input[dst_idx]);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(next_to_current->neurons_input[dst_idx]);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(next_to_current->neurons_input[dst_idx]);
}
}
else if (next_to_current->subsampling == true)
{
//Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average (here max pooling) of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
// Subsampling goes here ...
int src_layer_size = imw * imh * src_fmaps;
int dst_fmap_size = next_imh * next_imw;
int dst_layer_size = next_imw * next_imh * dst_fmaps;
int src_fmap_ctr = 0;
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
int dst_fmap_ctr = src_fmap_ctr;
int fmap_stidx = batchctr * src_layer_size + src_fmap_ctr * imh * imw;
int next_fmap_stidx = batchctr * dst_layer_size + dst_fmap_ctr * dst_fmap_size;
real_t cweight = current->weights_matrix[src_fmap_ctr];
int wctr, hctr;
for (hctr = 0; hctr < imh; hctr += 2)
{
for (wctr = 0; wctr < imw; wctr += 2)
{
int cidx = fmap_stidx + hctr * imw + wctr;
real_t p01, p02, p03, p04;
p01 = current->neurons_output[cidx];
p02 = current->neurons_output[cidx + 1];
p03 = current->neurons_output[cidx + imw];
p04 = current->neurons_output[cidx + imw + 1];
int dhctr = hctr/2;
int dwctr = wctr/2;
int dst_pos = next_fmap_stidx + dhctr * next_imw + dwctr;
real_t spooling_result = 0; int poolingIdx1 = -1;
real_t pooled = 0;
if (next_to_current->pool_type == 1)
{
// average pooling
pooled = (p01 + p02 + p03 + p04)/4;
next_to_current->neurons_input[dst_pos] = current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_pos] += cweight * pooled;
}
else if (next_to_current->pool_type == 2)
{
// max pooling
int idx = 0;
pooled = pool_max(p01, p02, p03, p04, &idx, 4);
if (idx == 0) next_to_current->gradientMap[dst_pos] = cidx;
else if (idx == 1) next_to_current->gradientMap[dst_pos] = cidx + 1;
else if (idx == 2) next_to_current->gradientMap[dst_pos] = cidx + imw;
else if (idx == 3) next_to_current->gradientMap[dst_pos] = cidx + imw + 1;
next_to_current->neurons_input[dst_pos] = pooled;
}
else if (next_to_current->pool_type == 3)
{
spooling_result = stochastic_pooling(p01, p02, p03, p04, &poolingIdx1);
pooled = spooling_result;
if (poolingIdx1 == 0)
next_to_current->gradientMap[dst_pos] = cidx;
else if (poolingIdx1 == 1)
next_to_current->gradientMap[dst_pos] = cidx + 1;
else if (poolingIdx1 == 2)
next_to_current->gradientMap[dst_pos] = cidx + imw;
else if (poolingIdx1 == 3)
next_to_current->gradientMap[dst_pos] = cidx + imw + 1;
next_to_current->neurons_input[dst_pos] = pooled;
}
if (next_to_current->layer_type == 1)
{
next_to_current->neurons_output[dst_pos] = sigmoid(next_to_current->neurons_input[dst_pos]);
}
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_pos] = htangent(next_to_current->neurons_input[dst_pos]);
}
else if (next_to_current->layer_type == 3)
{
next_to_current->neurons_output[dst_pos] = reLUSoftPlus(next_to_current->neurons_input[dst_pos]);
}
}
}
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
return 0;
}
real_t sigmoid(real_t x)
{
real_t y = 0.0;
y = 1.0/(1 + exp(-x));
return y;
}
real_t htangent(real_t x)
{
return 1.7159 * tanh(0.66666 * x);
}
real_t reLUSoftPlus(real_t x)
{
if (x < 0)
return 0;
else
return x;
}
real_t dreLUSoftPlus(real_t x)
{
if (x < 0)
return 0;
else
return 1;
}
real_t dhtangent(real_t y)
{
real_t A = 1.7159;
real_t S = 2.0/3.0;
real_t d = A * S * (1 - y/A) * (1 + y/A);
return d;
}
real_t computeEntropy(real_t a11, real_t a12, real_t a13, real_t a14)
{
real_t sum = a11 + a12 + a13 + a14;
real_t e11 = 0, e12 = 0, e13 = 0, e14 = 0;
if (a11 != 0)
e11 = a11/sum * (real_t) log2((real_t)(a11/sum));
if (a12 != 0)
e12 = a12/sum * log2((real_t)(a12/sum));
if (a13 != 0)
e13 = a13/sum * log2((real_t)(a13/sum));
if (a14 != 0)
e14 = a14/sum * log2((real_t)(a14/sum));
real_t entropy = e11 + e12 + e13 + e14;
return entropy;
}
real_t stochastic_pooling(real_t a01, real_t a02, real_t a03, real_t a04, int* poolingIdx)
{
real_t sum = exp(a01) + exp(a02) + exp(a03) + exp(a04);
real_t p01 = exp(a01)/sum;
real_t p02 = exp(a02)/sum;
real_t p03 = exp(a03)/sum;
real_t p04 = exp(a04)/sum;
//cumulative distribution function (CDF)
real_t cdf[4] = {0, 0, 0, 0};
cdf[0] = p01;
cdf[1] = cdf[0] + p02;
cdf[2] = cdf[1] + p03;
cdf[3] = cdf[2] + p04;
real_t randSample = (real_t) rand() / (real_t) RAND_MAX;
if (randSample <= cdf[0])
{
*poolingIdx = 0;
return a01;
}
else if (randSample <= cdf[1])
{
*poolingIdx = 1;
return a02;
}
else if (randSample <= cdf[2])
{
*poolingIdx = 2;
return a03;
}
else
{
*poolingIdx = 3;
return a04;
}
}
real_t compute_mse(struct nnlayer* headlayer, int nouts, int* batch_indexes, unsigned char* lables)
{
struct nnlayer* current = headlayer;
struct nnlayer* lastlayer = NULL;
while (current != NULL)
{
if (current->next == NULL)
lastlayer = current;
current = current->next;
}
current = lastlayer;
int* desired_output = (int *) malloc(nouts * sizeof(int));
real_t mse = 0, avg_mse = 0;
int counter = 0;
for (counter = 0; counter < BATCH_SIZE; counter++)
{
// layer_type = 1 -> sigmoid, 2 -> tanh, 3 -> relu
if (current->layer_type == 1 || current->layer_type == 3 || current->layer_type == 0)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_output[doCtr] = 0;
}
else if (current->layer_type == 2)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_output[doCtr] = -1;
}
unsigned char cl = lables[batch_indexes[counter]];
desired_output[cl] = 1;
mse = 0.0;
int nctr = 0;
for (nctr = 0; nctr < nouts; nctr++)
{
real_t error = desired_output[nctr] - current->neurons_output[counter * nouts + nctr];
mse = mse + (error * error);
}
mse = mse/nouts;
avg_mse = avg_mse + mse;
}
free(desired_output);
return avg_mse/BATCH_SIZE;
}
void train_cnn(cnnlayer_t* headlayer, dataset_t* train_samples, dataset_t* test_samples)
{
int epoch_counter = 0;
int max_epoch = 50;
int *batch_indexes = (int *) malloc(sizeof(int) * train_samples->numVectors);
real_t min_mcr = 25.0;
bool_t gpu_turn = 1;
copy_hweights_to_dweights(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = d_compute_missclassification_rate(headlayer, test_samples);
printf("\n ===== BEFORE TRAINING ====");
printf("\n EpochCounter\t\tTEST SET");
printf("\n\n%6d\t\t\t%4.3f", epoch_counter, mcr_test_set);
printf("\n");
while (epoch_counter < max_epoch)
{
int nMcr = 1;
int nvecs = train_samples->numVectors;
int batch_count = nvecs/BATCH_SIZE;
mini_batching(batch_indexes, nvecs, true);
real_t avg_mse = 0;
int nouts = train_samples->lenlable;
long double time = 0.0;
real_t mcr = 0.0;
if (gpu_turn != 0)
{
GpuTimer timer;
long double elapsed = 0.0;
int bctr = 0; // counter in a batch
for (bctr = 0; bctr < batch_count; bctr++)
{
timer.Start();
d_feed_forward(headlayer, train_samples->data, &batch_indexes[bctr * BATCH_SIZE]);
d_compute_gradients_deltas(headlayer, nouts, train_samples->lables, &batch_indexes[bctr * BATCH_SIZE]);
d_update_weights(headlayer);
d_reset_vectors(headlayer);
timer.Stop();
float time_citer = timer.Elapsed();
elapsed += time_citer;
float mse = compute_mse(headlayer, nouts, &batch_indexes[bctr * BATCH_SIZE], train_samples->lables);
avg_mse += mse;
if (bctr % 1000 == 999) {
fprintf(stderr,"\nbctr/batch_count: %d/%d epoch_counter/max_epoch: %d/%d",
bctr + 1, batch_count,
epoch_counter + 1, max_epoch);
}
}
time = elapsed;
fprintf(stderr, "\n elapsed_time: %Lf", elapsed);
}
else
{
fprintf(stderr, "\n Feed Forward via CPU");
long double elapsed = 0.0;
int bctr = 0;
for (bctr = 0; bctr < batch_count; bctr++)
{
long start_ticks = getticks();
h_feed_forward(headlayer, train_samples->data, &batch_indexes[bctr * BATCH_SIZE]);
h_compute_gradients_deltas(headlayer, nouts, train_samples->lables, &batch_indexes[bctr * BATCH_SIZE]);
h_update_weights(headlayer);
reset_inputs_dweights_deltas(headlayer);
long end_ticks = getticks();
float time_citer = (float)(end_ticks - start_ticks)/3330000;
elapsed += time_citer;
float mse = compute_mse(headlayer, nouts, &batch_indexes[bctr * BATCH_SIZE], train_samples->lables);
avg_mse += mse;
if (bctr % 1000 == 999)
fprintf(stderr,"\nbctr/batch_count: %d/%d/%d MSE: %.5f", bctr + 1, batch_count, max_epoch, mse);
}
time = elapsed;
fprintf(stderr, "\n elapsed_time: %Lf", elapsed);
}
avg_mse = avg_mse / batch_count;
printf("\n Avg MSE: %f, epoch: %d", avg_mse, epoch_counter);
if (gpu_turn != 0 && epoch_counter % nMcr == 0)
{
copy_dweights_to_hweights(headlayer);
//display_weights_matrices(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = d_compute_missclassification_rate(headlayer, test_samples);
printf("\n =========================");
printf("\n EpochCounter\t\tTEST SET");
printf("\n\n%6d\t\t\t%4.3f", epoch_counter + 1, mcr_test_set);
printf("\n");
mcr = mcr_test_set;
d_reset_output_vectors(headlayer);
if (mcr_test_set < min_mcr)
{
fprintf(stderr,"Writing weight..");
char fn[4];
char fname[13] = "Checkpoint-";
sprintf (fn, "%d", epoch_counter);
strcat(fname, fn);
save_trained_network_weights(headlayer, fname);
min_mcr = mcr_test_set;
fprintf(stderr,"\t\tWriting weights done!\n\n");
}
}
else if (gpu_turn == 0 && epoch_counter % nMcr == 0)
{
//display_weights_matrices(headlayer);
real_t mcr_test_set = 0;
mcr_test_set = h_compute_missclassification_rate(headlayer, test_samples);
printf("\n =========================");
printf("\n EpochCounter TEST SET");
printf("\n\n %d %f ", epoch_counter + 1, mcr_test_set);
printf("\n");
mcr = mcr_test_set;
reset_inputs_dweights_deltas(headlayer);
if (mcr_test_set < min_mcr)
{
char fn[4];
char fname[13] = "Checkpoint-";
sprintf (fn, "%d", epoch_counter);
strcat(fname, fn);
save_trained_network_weights(headlayer, fname);
min_mcr = mcr_test_set;
}
}
writeMetrics(time, avg_mse, mcr);
if (epoch_counter % 5 == 0)
LEARNING_RATE = LEARNING_RATE * 0.93;
epoch_counter++;
}
fprintf(stderr,"\n");
free(batch_indexes);
}
void d_compute_gradients_deltas(cnnlayer_t *headlayer, int nouts, unsigned char* desired_output, int* batch_indexes)
{
int *desired_vec = (int *) malloc(sizeof(int) * nouts);
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer->next;
cnnlayer_t* lastlayer = NULL;
/* Reaching the last layer and propagating error gradients backwards */
while (current != NULL)
{
if (current->next == NULL)
{
lastlayer = current;
break;
}
current = current->next;
}
current = lastlayer;
int num_neurons = current->no_of_neurons, doCtr = 0;
if (current->layer_type == 1 || current->layer_type == 3)
{
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = 0;
}
else if (current->layer_type == 2)
{
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = -1;
}
int d_idx = desired_output[batch_indexes[batchctr]];
desired_vec[d_idx] = 1;
int outSize = current->no_of_neurons * BATCH_SIZE * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(current->neurons_output, current->d_neurons_output, outSize, cudaMemcpyDeviceToHost));
int ectr = 0;
for (ectr = 0; ectr < num_neurons; ectr++)
{
int b_idx = batchctr * num_neurons + ectr;
current->error_deltas[b_idx] = (current->neurons_output[b_idx] - desired_vec[ectr]);
//printf("\n Output: %f", current->neurons_output[b_idx]);
}
// Copy error_deltas to GPU global memory
HANDLE_ERROR(cudaMemcpy(current->d_error_deltas, current->error_deltas, outSize, cudaMemcpyHostToDevice));
current = lastlayer->previous;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (current->previous == NULL)
flag = false;
//back propagate the error deltas from here
int curr_height = current->fmap_height;
int curr_width = current->fmap_width;
int curr_fmap_size = curr_height * curr_width;
int prev_height = lastlayer->fmap_height;
int prev_width = lastlayer->fmap_width;
int prev_fmap_size = prev_height * prev_width;
if (current->fkernel == 1 && lastlayer->subsampling == false)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_delta_biases = current->d_delta_biases;
int nBlocks = lastlayer->no_of_neurons;
int nThreads = current->no_of_neurons;
int sh_mem_size = (2 * current->no_of_neurons + 1) * sizeof(real_t);
d_rear_DNN_errorbp<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_lerr_deltas, d_cerr_deltas, d_weights, d_delta_weights, d_delta_biases);
sh_mem_size = (current->no_of_neurons) * sizeof(real_t);
d_rear_DNN_update_error_deltas<<<1, nThreads, sh_mem_size>>>(d_output, d_cerr_deltas, current->layer_type);
cudaDeviceSynchronize();
/* For Debugging purpose only */
//int wSize = current->no_of_neurons * lastlayer->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(cudaMemcpy(current->delta_weights, d_delta_weights, wSize, cudaMemcpyDeviceToHost));
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(cudaMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, cudaMemcpyDeviceToHost));
}
else if (current->fkernel != 1 && lastlayer->subsampling == false)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_delta_biases = current->d_delta_biases;
int kerSize = current->fkernel * current->fkernel;
dim3 nBlocks(current->no_of_fmaps, lastlayer->no_of_fmaps, 1);
dim3 nThreads(current->fmap_width, current->fmap_height, 1);
int sh_mem_size = (prev_fmap_size + curr_fmap_size + kerSize + 1) * sizeof(real_t);
errorbp_convolution_layers2<<<nBlocks, nThreads, sh_mem_size >>>(d_output, d_lerr_deltas, d_cerr_deltas, d_weights, d_delta_weights, kerSize);
nBlocks.x = lastlayer->no_of_fmaps; nBlocks.y = 1; nBlocks.z = 1;
nThreads.x = lastlayer->fmap_width; nThreads.y = lastlayer->fmap_height; nThreads.z = 1;
errorbp_convolution_update_biases<<<nBlocks, nThreads>>>(d_lerr_deltas, d_delta_biases);
nBlocks.x = current->no_of_fmaps;
nBlocks.y = nBlocks.z = 1;
nThreads.x = current->fmap_width * current->fmap_height;
nThreads.y = nThreads.z = 1;
sh_mem_size = current->fmap_width * current->fmap_height * sizeof(real_t);
d_update_error_deltas<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_cerr_deltas, current->layer_type);
//int wSize = (current->no_of_fmaps * lastlayer->no_of_fmaps * kerSize) * sizeof(real_t);
//HANDLE_ERROR(cudaMemcpy(current->delta_weights, d_delta_weights, wSize, cudaMemcpyDeviceToHost));
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(cudaMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, cudaMemcpyDeviceToHost));
}
else if (lastlayer->subsampling == true)
{
real_t* d_output = current->d_neurons_output;
real_t* d_lerr_deltas = lastlayer->d_error_deltas;
real_t* d_cerr_deltas = current->d_error_deltas;
int* d_gradientMap = lastlayer->d_gradientMap;
dim3 nBlocks(current->no_of_fmaps, 1, 1);
dim3 nThreads(prev_width, prev_height, 1);
int layer_type = current->layer_type;
int sh_mem_size = (2 * prev_width * prev_height) * sizeof(real_t);
d_errbp_subsampling<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_lerr_deltas, d_cerr_deltas, d_gradientMap, layer_type);
cudaDeviceSynchronize();
//int nerrSize = current->no_of_neurons * sizeof(real_t);
//HANDLE_ERROR(cudaMemcpy(current->error_deltas, d_cerr_deltas, nerrSize, cudaMemcpyDeviceToHost));
}
if (flag == true)
{
lastlayer = current;
current = current->previous;
}
}
}
free(desired_vec);
}
void h_compute_gradients_deltas(cnnlayer_t *headlayer, int nouts, unsigned char* desired_output, int* batch_indexes)
{
int *desired_vec = (int *) malloc(sizeof(int) * nouts);
int batchctr = 0;
for (batchctr = 0; batchctr < BATCH_SIZE; batchctr++)
{
cnnlayer_t* current = headlayer->next; //skipped input layer
cnnlayer_t* lastlayer = NULL;
while (current != NULL)
{
int num_neurons = current->no_of_neurons;
int nctr = 0;
for (nctr = 0; nctr < num_neurons; nctr++)
{
int idx = batchctr * num_neurons + nctr;
if (current->layer_type == 1)
current->dy_output[idx] = current->neurons_output[idx] * (1 - current->neurons_output[idx]);
else if (current->layer_type == 2)
current->dy_output[idx] = dhtangent(current->neurons_output[idx]);
else if (current->layer_type == 3)
current->dy_output[idx] = dreLUSoftPlus(current->neurons_output[idx]);
}
if (current->next == NULL)
{
lastlayer = current;
break;
}
current = current->next;
}
current = lastlayer;
// compute error for last layer
int num_neurons = current->no_of_neurons;
if (current->layer_type == 1 || current->layer_type == 3)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = 0;
}
else if (current->layer_type == 2)
{
int doCtr = 0;
for (doCtr = 0; doCtr < nouts; doCtr++)
desired_vec[doCtr] = -1;
}
int d_idx = desired_output[batch_indexes[batchctr]];
desired_vec[d_idx] = 1;
int ectr = 0;
for (ectr = 0; ectr < num_neurons; ectr++)
{
int b_idx = batchctr * num_neurons + ectr;
current->error_deltas[b_idx] = (current->neurons_output[b_idx] - desired_vec[ectr]);
//printf("\n Output: %f", current->neurons_output[b_idx]);
}
current = lastlayer->previous;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (current->previous == NULL)
flag = false;
//back propagate the error deltas from here
int curr_no_fmaps = current->no_of_fmaps;
int curr_height = current->fmap_height;
int curr_width = current->fmap_width;
int curr_fmap_size = curr_height * curr_width;
int prev_no_fmaps = lastlayer->no_of_fmaps;
int prev_height = lastlayer->fmap_height;
int prev_width = lastlayer->fmap_width;
int prev_fmap_size = prev_height * prev_width;
int no_neurons_prev = prev_fmap_size * prev_no_fmaps;
int no_neurons_curr = curr_fmap_size * curr_no_fmaps;
if (current->fkernel == 1 && lastlayer->subsampling == false)
{
int playerUnitIdx = 0, clayerUnitIdx = 0;
for (clayerUnitIdx = 0; clayerUnitIdx < no_neurons_curr; clayerUnitIdx++)
{
real_t sum = 0.0;
int cUnitIdx = batchctr * no_neurons_curr + clayerUnitIdx;
for (playerUnitIdx = 0; playerUnitIdx < no_neurons_prev; playerUnitIdx++)
{
//for each of the neuron,we have dy_output
int pUnitIdx = batchctr * no_neurons_prev + playerUnitIdx;
int wIdx = playerUnitIdx * no_neurons_curr + clayerUnitIdx;
current->delta_weights[wIdx] += lastlayer->error_deltas[pUnitIdx] * current->neurons_output[cUnitIdx];
sum += lastlayer->error_deltas[pUnitIdx] * current->weights_matrix[wIdx];
}
current->error_deltas[cUnitIdx] = current->dy_output[cUnitIdx] * sum;
//printf("\n dy:%f, sum: %f", current->dy_output[cUnitIdx], sum);
}
for (playerUnitIdx = 0; playerUnitIdx < no_neurons_prev; playerUnitIdx++)
{
current->delta_biases[playerUnitIdx] += lastlayer->error_deltas[playerUnitIdx] * 1.0;
}
}
else if (current->fkernel != 1 && lastlayer->subsampling == false)
{
//convolutional layer with kernel of 5x5
int indexes[25];
int pfmapctr, cfmapctr;
for (cfmapctr = 0; cfmapctr < curr_no_fmaps; cfmapctr++)
{
int curr_fmap_stidx = batchctr * no_neurons_curr + cfmapctr * curr_fmap_size;
int iwstidx = cfmapctr * current->fkernel * current->fkernel;
for (pfmapctr = 0; pfmapctr < prev_no_fmaps; pfmapctr++)
{
int prev_fmap_stidx = batchctr * no_neurons_prev + pfmapctr * prev_fmap_size;
int fwstidx = iwstidx + pfmapctr * (curr_no_fmaps * current->fkernel * current->fkernel);
int i;
for (i = 0; i < prev_fmap_size; i++)
{
int tx, ty;
tx = i % lastlayer->fmap_width;
ty = i / lastlayer->fmap_width;
int bmargin = floor(current->fkernel/2);
int stx, sty;
stx = tx + bmargin;
sty = ty + bmargin;
//in the source fmap
int center = sty * current->fmap_width + stx;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
indexes[filterCtr] = center + convCtr1 * current->fmap_width + convCtr2;
filterCtr++;
}
}
int player_idx = prev_fmap_stidx + i;
int iter = 0;
for (iter = 0; iter < current->fkernel * current->fkernel; iter++)
{
int clayer_idx = curr_fmap_stidx + indexes[iter];
int weights_idx = fwstidx + iter;
current->delta_weights[weights_idx] += lastlayer->error_deltas[player_idx] * current->neurons_output[clayer_idx];
current->error_deltas[clayer_idx] += (current->weights_matrix[weights_idx] * lastlayer->error_deltas[player_idx] * current->dy_output[clayer_idx]);
}
if (cfmapctr == 0)
current->delta_biases[pfmapctr] += lastlayer->error_deltas[player_idx] * 1.0;
}
}
}
}
else if (lastlayer->subsampling == true)
{
int sindexes[4];
int pfmapCtr = 0;
for (pfmapCtr = 0; pfmapCtr < prev_no_fmaps; pfmapCtr++)
{
int pstidx = batchctr * no_neurons_prev + pfmapCtr * prev_fmap_size; //0, 25
int cfmapCtr = pfmapCtr;
int cstidx = batchctr * no_neurons_curr + cfmapCtr * curr_fmap_size;
int pfmUnitctr = 0;
for (pfmUnitctr = 0; pfmUnitctr < prev_fmap_size; pfmUnitctr++)
{
int player_idx = pstidx + pfmUnitctr;
int px = pfmUnitctr % lastlayer->fmap_width;
int py = pfmUnitctr / lastlayer->fmap_height;
int sx = px * 2;
int sy = py * 2;
int clUnitIdx = sy * current->fmap_width + sx;
sindexes[0] = cstidx + clUnitIdx;
sindexes[1] = cstidx + clUnitIdx + 1;
sindexes[2] = cstidx + clUnitIdx + curr_width;
sindexes[3] = cstidx + clUnitIdx + curr_width + 1;
if (current->pool_type == 1)
{
int j = 0;
for (j = 0; j < 4; j++)
{
current->delta_weights[cfmapCtr] += lastlayer->error_deltas[player_idx] * current->neurons_output[sindexes[j]];
current->error_deltas[sindexes[j]] = (current->weights_matrix[cfmapCtr] * lastlayer->error_deltas[player_idx]) * current->dy_output[sindexes[j]];
}
current->delta_biases[cfmapCtr] += lastlayer->error_deltas[player_idx] * 1.0;
}
else if (current->pool_type == 2)
{
int gradientIdx = lastlayer->gradientMap[player_idx];
//curent->delta_weights[cfmpCt]+=lastlayer->error_deltas[player_idx]*current->neurons_output[gradientIdx];
current->error_deltas[gradientIdx] = lastlayer->error_deltas[player_idx] * current->dy_output[gradientIdx];
}
else if (current->pool_type == 3)
{
int gradientIdx = lastlayer->gradientMap[player_idx];
current->error_deltas[gradientIdx] = lastlayer->error_deltas[player_idx] * current->dy_output[gradientIdx];
//current->delta_biases[cfmapCtr] += lastlayer->error_deltas[player_idx] * 1.0;
}
}
}
}
if (flag == true)
{
lastlayer = current;
current = current->previous;
}
}
}
free(desired_vec);
}
//accumulate weight deltas
void average_deltas(struct nnlayer* headlayer)
{
if (BATCH_SIZE > 1)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
int no_dweights = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel;
int cctr = 0;
for (cctr = 0; cctr < no_dweights; cctr++)
{
current->delta_weights[cctr] = current->delta_weights[cctr]/BATCH_SIZE;
}
//biases
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int curr_count = current->no_of_neurons;
int next_curr_count = next_to_current->no_of_neurons;
int cctr = 0;
int ncctr = 0;
for (cctr = 0; cctr < curr_count; cctr++)
{
for (ncctr = 0; ncctr < next_curr_count; ncctr++)
{
int idx = cctr * next_curr_count + ncctr;
current->delta_weights[idx] = current->delta_weights[idx]/BATCH_SIZE;
}
}
//biases
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
else if (next_to_current->subsampling == true)
{
// Subsampling layer
int count = current->no_of_fmaps;
int counter = 0;
for (counter = 0; counter < count; counter++)
{
current->delta_weights[counter] = current->delta_weights[counter]/BATCH_SIZE;
}
//biases
int cctr = 0;
for (cctr = 0; cctr < next_to_current->no_of_fmaps; cctr++)
{
current->delta_biases[cctr] = current->delta_biases[cctr]/BATCH_SIZE;
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
}
void h_update_weights(struct nnlayer* headlayer)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int ndweights = 0;
if (next_to_current->subsampling == false)
ndweights = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel;
else
ndweights = current->no_of_fmaps;
int counter = 0;
for (counter = 0; counter < ndweights; counter++)
{
current->weights_matrix[counter] -= LEARNING_RATE * current->delta_weights[counter];
}
for (counter = 0; counter < next_to_current->no_of_fmaps; counter++)
{
current->biases[counter] -= LEARNING_RATE * current->delta_biases[counter];
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void d_update_weights(struct nnlayer* headlayer)
{
struct nnlayer* current = headlayer;
struct nnlayer* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
real_t* d_weights = current->d_weights;
real_t* d_delta_weights = current->d_delta_weights;
real_t* d_biases = current->d_biases;
real_t* d_delta_biases = current->d_delta_biases;
if (next_to_current->subsampling == true)
{
int nBlocks = current->no_of_fmaps;
int nThreads = 1;
int sh_mem_size = sizeof(real_t);
d_update_weights_kernel<<<nBlocks, nThreads, sh_mem_size>>>(d_weights, d_delta_weights, LEARNING_RATE);
int nwSize = current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(current->weights_matrix, current->d_weights, nwSize, cudaMemcpyDeviceToHost));
d_update_biases_kernel<<< next_to_current->no_of_fmaps, 1 >>>(d_biases, d_delta_biases, LEARNING_RATE);
HANDLE_ERROR(cudaMemcpy(current->biases, current->d_biases, nwSize, cudaMemcpyDeviceToHost));
}
else
{
dim3 nBlocks(current->no_of_fmaps, next_to_current->no_of_fmaps, 1);
dim3 nThreads(current->fkernel, current->fkernel, 1);
int sh_mem_size = 2 * current->fkernel * current->fkernel * sizeof(real_t);
d_update_weights_kernel<<<nBlocks, nThreads, sh_mem_size>>>(d_weights, d_delta_weights, LEARNING_RATE);
int nwSize = current->no_of_fmaps * next_to_current->no_of_fmaps * current->fkernel * current->fkernel * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(current->weights_matrix, current->d_weights, nwSize, cudaMemcpyDeviceToHost));
d_update_biases_kernel<<< next_to_current->no_of_fmaps, 1 >>>(d_biases, d_delta_biases, LEARNING_RATE);
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(current->biases, current->d_biases, nbSize, cudaMemcpyDeviceToHost));
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void hd_reset_biases(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_biases, 0, nbSize));
memset(current->biases, 0, nbSize);
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
}
}
void d_reset_vectors(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int fk = current->fkernel;
if (next_to_current->subsampling == true)
{
int nwSize = current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_delta_weights, 0, nwSize));
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_delta_biases, 0, nbSize));
}
else
{
int nwSize = current->no_of_fmaps * next_to_current->no_of_fmaps * fk * fk * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_delta_weights, 0, nwSize));
int nbSize = next_to_current->no_of_fmaps * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_delta_biases, 0, nbSize));
}
int noSize = current->no_of_fmaps * current->fmap_width * current->fmap_height * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_neurons_output, 0, noSize));
HANDLE_ERROR(cudaMemset(current->d_error_deltas, 0, noSize));
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
else
{
//this is the very last layer
int noSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(cudaMemset(next_to_current->d_neurons_output, 0, noSize));
HANDLE_ERROR(cudaMemset(next_to_current->d_error_deltas, 0, noSize));
}
}
}
void d_reset_output_vectors(cnnlayer_t* headlayer)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int noSize = current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(cudaMemset(current->d_neurons_output, 0, noSize));
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
else
{
int noSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(cudaMemset(next_to_current->d_neurons_output, 0, noSize));
}
}
}
int sign(real_t n1, real_t n2)
{
if (n1 * n2 > 0)
return 1;
else if (n1 * n2 < 0)
return -1;
else
return 0;
}
void reset_inputs_dweights_deltas(cnnlayer_t* headlayer)
{
//printf("\nreset_dweight_deltas");
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_nfmaps = current->no_of_fmaps;
int dst_nfmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int weight_matrix_size = 0, biases_size = 0;
if (next_to_current->subsampling == true)
{
weight_matrix_size = src_nfmaps;
biases_size = dst_nfmaps;
}
else
{
weight_matrix_size = src_nfmaps * dst_nfmaps * fkernel * fkernel;
biases_size = dst_nfmaps;
}
int counter = 0;
for (counter = 0; counter < weight_matrix_size; counter++)
current->delta_weights[counter] = 0.0;
for (counter = 0; counter < biases_size; counter++)
current->delta_biases[counter] = 0.0;
//reset error deltas and neurons_input fields
int reset_size = current->no_of_neurons * BATCH_SIZE;
for (counter = 0; counter < reset_size; counter++)
{
current->error_deltas[counter] = 0.0;
current->neurons_input[counter] = 0.0;
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
// the last layer next_to_current does not collect +=,
// neither neurons_input[] nor error_deltas, but GPU version needs
// it to be cleared
}
}
real_t h_compute_missclassification_rate(cnnlayer_t *headlayer, dataset_t *samples)
{
fprintf(stderr, "\n computing MCR, No. of samples: %d\n, Progress: ", samples->numVectors);
int mcr = 0;
int datactr = 0;
for (datactr = 0; datactr < samples->numVectors; datactr++)
{
if (datactr % 1000 == 0)
fprintf(stderr, ".");
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
bool_t flag = true;
while (current != NULL)
{
int resetctr = 0;
for (resetctr = 0; resetctr < current->no_of_neurons; resetctr++)
{
current->neurons_input[resetctr] = 0.0;
current->neurons_output[resetctr] = 0.0;
}
current = current->next;
}
current = headlayer;
next_to_current = current->next;
int inp_vec_size = current->no_of_neurons;
int desired_label = samples->lables[datactr];
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int inputIdx = datactr * inp_vec_size + input_data_ctr;
current->neurons_input[input_data_ctr] = samples->data[inputIdx];
current->neurons_output[input_data_ctr] = samples->data[inputIdx];
}
flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
//convolution layers
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel;
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
//for the first layer, output = input
int sctr = 0, dctr = 0;
for (dctr = 0; dctr < dst_fmaps; dctr++)
{
for (sctr = 0; sctr < src_fmaps; sctr++)
{
int weights_stidx = dctr * fkernel * fkernel * src_fmaps;
int st_idx = weights_stidx + sctr * fkernel * fkernel;
real_t* filter = NULL;
filter = &(current->weights_matrix[st_idx]);
int fmap_stidx = sctr * imh * imw;
//destination feature map
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
int dst_fmap_stidx = dctr * next_imh * next_imw;
int dst_fmap_ctr = 0;
int hctr = 0;
int wctr = 0;
for (hctr = 0; hctr < imh; hctr++)
{
for (wctr = 0; wctr < imw; wctr++)
{
if ((hctr >= bmargin && wctr >= bmargin) && (hctr < imh - bmargin && wctr < imw - bmargin))
{
int cidx = fmap_stidx + hctr * imw + wctr;
real_t sum = 0.0;
int filterCtr = 0, convCtr1 = 0, convCtr2 = 0;
for (convCtr1 = -1 * floor(current->fkernel/2); convCtr1 <= floor(current->fkernel/2); convCtr1++)
{
for (convCtr2 = -1 * floor(current->fkernel/2); convCtr2 <= floor(current->fkernel/2); convCtr2++)
{
sum = sum + filter[filterCtr] * current->neurons_output[cidx + convCtr1 * imw + convCtr2];
filterCtr++;
}
}
//save summation to destination feature map
int dst_idx = dst_fmap_stidx + dst_fmap_ctr;
next_to_current->neurons_input[dst_idx] += sum;
//applying transfer function
if (sctr == src_fmaps - 1)
{
next_to_current->neurons_input[dst_idx] += current->biases[dctr];
real_t cn = next_to_current->neurons_input[dst_idx];
next_to_current->neurons_input[dst_idx] = 0;
if (current->layer_type == 1)
next_to_current->neurons_output[dst_idx] = sigmoid(cn);
else if (current->layer_type == 2)
{
next_to_current->neurons_output[dst_idx] = htangent(cn);
}
else if (current->layer_type == 3)
next_to_current->neurons_output[dst_idx] = reLUSoftPlus(cn);
}
dst_fmap_ctr++;
}
}
}
}
}
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
int dcounter = 0;
int scounter = 0;
real_t sum = 0.0;
for (dcounter = 0; dcounter < dst_layer_size; dcounter++)
{
sum = 0.0;
for (scounter = 0; scounter < src_layer_size; scounter++)
{
real_t cweight = 0.0;
cweight = current->weights_matrix[dcounter * src_layer_size + scounter];
real_t xdata = 0;
xdata = current->neurons_output[scounter];
sum += cweight * xdata;
}
next_to_current->neurons_input[dcounter] = sum + current->biases[dcounter];
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dcounter] = sigmoid(next_to_current->neurons_input[dcounter]);
else if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dcounter] = htangent(next_to_current->neurons_input[dcounter]);
}
else if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dcounter] = reLUSoftPlus(next_to_current->neurons_input[dcounter]);
}
}
else if (current->fkernel == 1 && next_to_current->subsampling == true)
{
// Subsampling goes here ...
int src_fmaps = current->no_of_fmaps;
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
int dst_fmap_size = next_imh * next_imw;
int src_fmap_ctr = 0;
for (src_fmap_ctr = 0; src_fmap_ctr < src_fmaps; src_fmap_ctr++)
{
int dst_fmap_ctr = src_fmap_ctr;
int fmap_stidx = src_fmap_ctr * imh * imw;
int next_fmap_stidx = dst_fmap_ctr * dst_fmap_size;
real_t cweight = current->weights_matrix[src_fmap_ctr];
int wctr = 0, hctr = 0;
for (hctr = 0; hctr < imh; hctr += 2)
{
for (wctr = 0; wctr < imw; wctr += 2)
{
int cidx = fmap_stidx + hctr * imw + wctr;
int dhctr = hctr/2;
int dwctr = wctr/2;
int dst_pos = next_fmap_stidx + dhctr * next_imw + dwctr;
real_t p1, p2, p3, p4;
p1 = current->neurons_output[cidx];
p2 = current->neurons_output[cidx + 1];
p3 = current->neurons_output[cidx + imw];
p4 = current->neurons_output[cidx + imw + 1];
//max = pool_max(p1, p2, p3, p4, 4);
real_t sum = (p1 + p2 + p3 + p4);
real_t pooled = 0;
if (current->pool_type == 1)
{
pooled = sum/4;
next_to_current->neurons_input[dst_pos] = current->biases[dst_fmap_ctr];
next_to_current->neurons_input[dst_pos] += pooled * cweight;
}
else if (current->pool_type == 2)
{
int idx = 0;
pooled = pool_max(p1, p2, p3, p4, &idx, 4);
next_to_current->neurons_input[dst_pos] = pooled;
}
else if (current->pool_type == 3)
{
pooled = (p1 + p2 + p3 + p4)/4;
next_to_current->neurons_input[dst_pos] = pooled;
}
if (next_to_current->layer_type == 1)
next_to_current->neurons_output[dst_pos] = sigmoid(next_to_current->neurons_input[dst_pos]);
if (next_to_current->layer_type == 2)
{
next_to_current->neurons_output[dst_pos] = htangent(next_to_current->neurons_input[dst_pos]);
}
if (next_to_current->layer_type == 3)
next_to_current->neurons_output[dst_pos] = reLUSoftPlus(next_to_current->neurons_input[dst_pos]);
}
}
}
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
//we are at the last layer and we can compute miss classification rate over here
if (flag == false)
{
int mctr = 0;
real_t max = next_to_current->neurons_output[0];
int maxidx = 0;
for (mctr = 0; mctr < samples->lenlable; mctr++)
{
if (next_to_current->neurons_output[mctr] > max)
{
max = next_to_current->neurons_output[mctr];
maxidx = mctr;
}
}
if(desired_label != maxidx)
mcr++;
}
}
}
return ((real_t) mcr/(real_t)(samples->numVectors) * 100);
}
real_t d_compute_missclassification_rate(cnnlayer_t *headlayer, dataset_t* samples)
{
int d_mcr = 0;
int sampleCtr = 0;
int corr = (float)rand() / (float)RAND_MAX * 1000; int miss = (float)rand() / (float)RAND_MAX * 100;
for (sampleCtr = 0; sampleCtr < samples->numVectors; sampleCtr++)
{
cnnlayer_t* current = headlayer;
cnnlayer_t* next_to_current = current->next;
//This is needed as neurons_output accumulates input (+=)
d_reset_output_vectors(headlayer);
int inp_vec_size = current->no_of_neurons;
int desired_label = samples->lables[sampleCtr];
int input_data_ctr = 0;
for (input_data_ctr = 0; input_data_ctr < inp_vec_size; input_data_ctr++)
{
int inputIdx = sampleCtr * inp_vec_size + input_data_ctr;
current->neurons_input[input_data_ctr] = samples->data[inputIdx];
current->neurons_output[input_data_ctr] = samples->data[inputIdx];
}
int outSize = inp_vec_size * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(current->d_neurons_output, current->neurons_output, outSize, cudaMemcpyHostToDevice));
bool_t flag = true;
while (current != NULL && flag == true)
{
if (next_to_current->next == NULL)
flag = false;
int src_fmaps = current->no_of_fmaps;
int dst_fmaps = next_to_current->no_of_fmaps;
int fkernel = current->fkernel; // Kernel size
int bmargin = floor(fkernel/2);
int imh = current->fmap_height;
int imw = current->fmap_width;
int next_imw = next_to_current->fmap_width;
int next_imh = next_to_current->fmap_height;
if (next_to_current->subsampling == false && current->fkernel != 1)
{
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_input = current->d_neurons_output;
real_t* d_kernel = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(src_fmaps, dst_fmaps, 1); // Grid-dimension -> (src_fmaps * dst_fmaps) blocks
dim3 nThreads(imw, imh, 1); // Block-dimension -> (imw * imh) threads/block
int sh_mem_size = imw * imh * sizeof(real_t) + fkernel * fkernel * sizeof(real_t);
convolve_device_2D<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_kernel, fkernel * fkernel);
compute_transfer_function<<<dst_fmaps, next_imw * next_imh >>>(d_output, d_biases, current->layer_type);
cudaDeviceSynchronize();
}
else if (next_to_current->subsampling == false && current->fkernel == 1)
{
int src_layer_size = current->no_of_neurons;
int dst_layer_size = next_to_current->no_of_neurons;
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
real_t* d_weights = current->d_weights;
real_t* d_biases = current->d_biases;
dim3 nBlocks(dst_layer_size, 1, 1);
dim3 nThreads(src_layer_size, 1, 1);
int sh_mem_size = (2 * src_layer_size) * sizeof(real_t);
d_rear_DNN<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_weights);
compute_transfer_function<<< dst_layer_size, 1 >>>(d_output, d_biases, current->layer_type);
cudaDeviceSynchronize();
}
else if (next_to_current->subsampling == true)
{
// How to perform average pooling
// Pattern Recognition and Machine Learning, By Christopher M. Bishop (P267)
// ... Each subsampling unit might take inputs from a 2x2 unit region in the
// corresponding feature map and would compute the average of
// those inputs, multiplied by an adaptive weight with the addition of an adaptive bias
// parameter, and then transformed using a sigmoidal non-linear activation function.
real_t* d_input = current->d_neurons_output;
real_t* d_output = next_to_current->d_neurons_output;
int* d_gradientMap = next_to_current->d_gradientMap;
dim3 nBlocks(src_fmaps, 1, 1);
dim3 nThreads(imw, imh, 1);
int sh_mem_size = imw * imh * sizeof(real_t);
d_subsampling<<<nBlocks, nThreads, sh_mem_size>>>(d_output, d_input, d_gradientMap, current->layer_type);
cudaDeviceSynchronize();
}
if (flag == true)
{
current = next_to_current;
next_to_current = current->next;
}
if (flag == false)
{
int noutSize = next_to_current->no_of_neurons * sizeof(real_t);
HANDLE_ERROR(cudaMemcpy(next_to_current->neurons_output, next_to_current->d_neurons_output, noutSize, cudaMemcpyDeviceToHost));
int mctr = 0;
real_t max = next_to_current->neurons_output[0];
int maxidx = 0;
for (mctr = 0; mctr < samples->lenlable; mctr++)
{
if (next_to_current->neurons_output[mctr] > max)
{
max = next_to_current->neurons_output[mctr];
maxidx = mctr;
}
}
if(desired_label != maxidx) {
if (miss == 1) {
display_layer(headlayer);
fprintf(stderr, "\nGround Truth: %d", desired_label);
fprintf(stderr, "\nPredicted: %d\n\n", maxidx);
}
miss--;
d_mcr++;
}
else {
if (corr == 1) {
display_layer(headlayer);
fprintf(stderr, "\nGround Truth: %d", desired_label);
fprintf(stderr, "\nPredicted: %d\n\n", maxidx);
}
corr--;
}
}
}
}
return ((real_t) d_mcr/(real_t)(samples->numVectors) * 100);
}
|
fd45ba009ef7efa8073662ff46f2005483165ba6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#define TILE_DIM 16
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// 1. Naive: only use hipblasSgemm: nn or tn
// 2. Force: force to use transpose + nn if there is enough memory
// 3. Linear model: some cases: transpose+nn, other cases: tn
if (M > 1024 && N > 1024 && K > 1024 && cuTransA == HIPBLAS_OP_N && cuTransB == HIPBLAS_OP_T) {
//size_t availableMemory, totalMemory;
//hipMemGetInfo(&availableMemory, &totalMemory);
//size_t neededMemory = sizeof(float) * N * K;
//if (availableMemory > neededMemory) {
//double label = Caffe::predict(K, M, N);
float label = Caffe::Get().xgPredict(K, M, N);
if (label < 0.5) {
caffe_gpu_gemm_tn(M, N, K, alpha, A, B, beta, C);
} else {
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
//} else {
// CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
// N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
//}
} else {
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
}
// Customized by COMP@HKBU
// Authors: Pengfei Xu, Shaohuai Shi
template <typename Dtype>
__global__ void caffe_gpu_transpose(float *dmt, float *dm, int w, int h)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int old_idx = row*w + col;
int new_idx = col*h + row;
if(row < h && col < w){
tile[threadIdx.y][threadIdx.x] = dm[old_idx];
__syncthreads();
dmt[new_idx] = tile[threadIdx.y][threadIdx.x];
}
}
// Compute C = alpha * A * B^T + beta * C
// Size of A: M * K, WA=K, HA=M
// Size of B: N * K, WB=K, HB=N
// Size of C: M * N, WC=N, HC=M
void caffe_gpu_gemm_tn(const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Do the transposition first before calling cublas
//int size_x, size_y;
//size_x = K;
//size_y = N;
//dim3 gridt((size_x - 1)/TILE_DIM + 1, (size_y - 1)/TILE_DIM + 1), blockt(TILE_DIM, TILE_DIM);
float *d_BT;
CUDA_CHECK(hipMalloc((void **) &d_BT, K * N * sizeof(float)));
//caffe_gpu_transpose<float><<<gridt, blockt>>>(d_BT, (float *)B, size_x, size_y);
const float alpha1 = 1.0f;
const float beta1 = 0.0f;
CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T,
N, K,
&alpha1, B, K,
&beta1, NULL, K,
d_BT, N));
//int lda = K;
//int ldb = N;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
N, M, K, &alpha, d_BT, N, A, K, &beta, C, N));
CUDA_CHECK(hipFree(d_BT));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| fd45ba009ef7efa8073662ff46f2005483165ba6.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#define TILE_DIM 16
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// 1. Naive: only use cublasSgemm: nn or tn
// 2. Force: force to use transpose + nn if there is enough memory
// 3. Linear model: some cases: transpose+nn, other cases: tn
if (M > 1024 && N > 1024 && K > 1024 && cuTransA == CUBLAS_OP_N && cuTransB == CUBLAS_OP_T) {
//size_t availableMemory, totalMemory;
//cudaMemGetInfo(&availableMemory, &totalMemory);
//size_t neededMemory = sizeof(float) * N * K;
//if (availableMemory > neededMemory) {
//double label = Caffe::predict(K, M, N);
float label = Caffe::Get().xgPredict(K, M, N);
if (label < 0.5) {
caffe_gpu_gemm_tn(M, N, K, alpha, A, B, beta, C);
} else {
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
//} else {
// CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
// N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
//}
} else {
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
}
// Customized by COMP@HKBU
// Authors: Pengfei Xu, Shaohuai Shi
template <typename Dtype>
__global__ void caffe_gpu_transpose(float *dmt, float *dm, int w, int h)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int old_idx = row*w + col;
int new_idx = col*h + row;
if(row < h && col < w){
tile[threadIdx.y][threadIdx.x] = dm[old_idx];
__syncthreads();
dmt[new_idx] = tile[threadIdx.y][threadIdx.x];
}
}
// Compute C = alpha * A * B^T + beta * C
// Size of A: M * K, WA=K, HA=M
// Size of B: N * K, WB=K, HB=N
// Size of C: M * N, WC=N, HC=M
void caffe_gpu_gemm_tn(const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Do the transposition first before calling cublas
//int size_x, size_y;
//size_x = K;
//size_y = N;
//dim3 gridt((size_x - 1)/TILE_DIM + 1, (size_y - 1)/TILE_DIM + 1), blockt(TILE_DIM, TILE_DIM);
float *d_BT;
CUDA_CHECK(cudaMalloc((void **) &d_BT, K * N * sizeof(float)));
//caffe_gpu_transpose<float><<<gridt, blockt>>>(d_BT, (float *)B, size_x, size_y);
const float alpha1 = 1.0f;
const float beta1 = 0.0f;
CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T,
N, K,
&alpha1, B, K,
&beta1, NULL, K,
d_BT, N));
//int lda = K;
//int ldb = N;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
N, M, K, &alpha, d_BT, N, A, K, &beta, C, N));
CUDA_CHECK(cudaFree(d_BT));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
Sigmoid.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if USE_ROCM
#include "cuda/Common.h"
#include "activation/Sigmoid.h"
namespace nn {
namespace activation {
__device__ float sigmoid(float x) {
return 1.0f / (1.0f + exp(-x));
}
__global__ void SigmoidForwardImpl(const int N, const float* X, float* Y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N) {
Y[i] = sigmoid(X[i]);
}
}
__global__ void SigmoidBackpropImpl(const int N, const float* dFdY, const float* Y, float* dFdX) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N) {
dFdX[i] = dFdY[i] * Y[i] * (1.0f - Y[i]);
}
}
template <>
void Sigmoid<Cuda>::Forward(const Matrix<Cuda>& X) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("Sigmoid forward: wrong matrix shape");
}
const auto N = shape.Size();
this->_Y.Reshape(shape);
this->_dFdX.Reshape(shape);
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( SigmoidForwardImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, X.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("Sigmoid: cannot perform forward propagation");
}
template <>
void Sigmoid<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, const float /*learning_rate*/) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows != this->_Y.GetShape().rows)) {
throw Exception("Sigmoid backprop: wrong matrix shape");
}
const auto N = shape.Size();
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( SigmoidBackpropImpl), dim3(num_of_blocks), dim3(block_size), 0, 0,
N, dFdY.DeviceData(), this->_Y.DeviceData(), this->_dFdX.DeviceData());
Exception::ThrowOnError("Sigmoid: cannot perform back propagation");
}
} //namespace activation
} //namespace nn
#endif //USE_ROCM
| Sigmoid.cu | #if USE_CUDA
#include "cuda/Common.h"
#include "activation/Sigmoid.h"
namespace nn {
namespace activation {
__device__ float sigmoid(float x) {
return 1.0f / (1.0f + exp(-x));
}
__global__ void SigmoidForwardImpl(const int N, const float* X, float* Y) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N) {
Y[i] = sigmoid(X[i]);
}
}
__global__ void SigmoidBackpropImpl(const int N, const float* dFdY, const float* Y, float* dFdX) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N) {
dFdX[i] = dFdY[i] * Y[i] * (1.0f - Y[i]);
}
}
template <>
void Sigmoid<Cuda>::Forward(const Matrix<Cuda>& X) {
const auto& shape = X.GetShape();
if((this->_Y.GetShape().cols != shape.cols)) {
throw Exception("Sigmoid forward: wrong matrix shape");
}
const auto N = shape.Size();
this->_Y.Reshape(shape);
this->_dFdX.Reshape(shape);
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
SigmoidForwardImpl<<<num_of_blocks, block_size>>>
(N, X.DeviceData(), this->_Y.DeviceData());
Exception::ThrowOnError("Sigmoid: cannot perform forward propagation");
}
template <>
void Sigmoid<Cuda>::Backprop(const Matrix<Cuda>& X, const Matrix<Cuda>& dFdY, const float /*learning_rate*/) {
const auto& shape = X.GetShape();
if((shape.cols != dFdY.GetShape().cols) || (shape.rows != dFdY.GetShape().rows) ||
(shape.cols != this->_Y.GetShape().cols) || (shape.rows != this->_Y.GetShape().rows)) {
throw Exception("Sigmoid backprop: wrong matrix shape");
}
const auto N = shape.Size();
dim3 block_size(kVectorBlockSize);
dim3 num_of_blocks((N + block_size.x - 1) / block_size.x);
SigmoidBackpropImpl<<<num_of_blocks, block_size>>>
(N, dFdY.DeviceData(), this->_Y.DeviceData(), this->_dFdX.DeviceData());
Exception::ThrowOnError("Sigmoid: cannot perform back propagation");
}
} //namespace activation
} //namespace nn
#endif //USE_CUDA
|
537925d30f7e63c7b9e718f065f4128e01394195.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 3049 $
// $Date: 2007-02-26 10:42:36 -0800 (Mon, 26 Feb 2007) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* compact_app.cu
*
* @brief CUDPP application-level compact routines
*/
#include "cudpp_globals.h"
#include "cudpp_util.h"
#include "cudpp.h"
#include "cudpp_plan.h"
#include "cudpp_scan.h"
#include "kernel/compact_kernel.cu"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
/** \addtogroup cudpp_app
* @{
*/
/** @name Compact Functions
* @{
*/
/** @brief Calculate launch parameters for compactArray().
*
* Calculates the block size and number of blocks from the total
* number of elements and the maximum threads per block. Called by
* compactArray().
*
* The calculation is pretty straightforward - the number of blocks
* is calculated by dividing the number of input elements by the product
* of the number of threads in each CTA and the number of elements each thread
* will process. numThreads and numEltsPerBlock are also simple to
* calculate. Please note that in cases where numElements is not an exact
* multiple of SCAN_ELTS_PER_THREAD * CTA_SIZE we would have threads
* which do nothing or have a thread which will process less than
* SCAN_ELTS_PER_THREAD elements.
*
*
* @param[in] numElements Number of elements to sort
* @param[out] numThreads Number of threads in each block
* @param[out] numBlocks Number of blocks
* @param[out] numEltsPerBlock Number of elements processed per block
*
*/
void calculatCompactLaunchParams(const unsigned int numElements,
unsigned int &numThreads,
unsigned int &numBlocks,
unsigned int &numEltsPerBlock)
{
numBlocks =
max(1, (int)ceil((float)numElements /
((float)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
numThreads = CTA_SIZE;
}
else
{
numThreads = (unsigned int)ceil((float)numElements / (float)SCAN_ELTS_PER_THREAD);
}
numEltsPerBlock = numThreads * SCAN_ELTS_PER_THREAD;
}
/** @brief Compact the non-zero elements of an array.
*
* Given an input array \a d_in, compactArray() outputs a compacted version
* which does not have null (zero) elements. Also ouputs the number of non-zero
* elements in the compacted array. Called by ::cudppCompactDispatch().
*
* The algorithm is straightforward, involving two steps (most of the
* complexity is hidden in scan, invoked with cudppScanDispatch() ).
*
* -# scanArray() performs a prefix sum on \a d_isValid to compute output
* indices.
* -# compactData() takes \a d_in and an intermediate array of output indices
* as input and writes the values with valid flags in \a d_isValid into
* \a d_out using the output indices.
*
* @param[out] d_out Array of compacted non-null elements
* @param[out] d_numValidElements Pointer to unsigned int to store number of
* non-null elements
* @param[in] d_in Input array
* @param[out] d_isValid Array of flags, 1 for each non-null element, 0
* for each null element. Same length as \a d_in
* @param[in] numElements Number of elements in input array
* @param[in] stream The stream to execute the kernel on
* @param[in] plan Pointer to the plan object used for this compact
*
*/
template<class T>
void compactArray(T *d_out,
unsigned int *d_numValidElements,
const T *d_in,
const unsigned int *d_isValid,
size_t numElements,
const hipStream_t stream,
const CUDPPCompactPlan *plan)
{
unsigned int numThreads = 0;
unsigned int numBlocks = 0;
unsigned int numEltsPerBlock = 0;
// Calculate CUDA launch parameters - number of blocks, number of threads
// @todo What is numEltsPerBlock doing here?
calculatCompactLaunchParams(numElements, numThreads, numBlocks, numEltsPerBlock);
// Run prefix sum on isValid array to find the addresses in the compacted
// output array where each non-null element of d_in will go to
cudppScanDispatch((void*)plan->m_d_outputIndices, (void*)d_isValid,
numElements, 1, stream, plan->m_scanPlan);
// For every non-null element in d_in write it to its proper place in the
// d_out. This is indicated by the corresponding element in isValid array
if (plan->m_config.options & CUDPP_OPTION_BACKWARD)
hipLaunchKernelGGL(( compactData<T, true>), dim3(numBlocks), dim3(numThreads), 0, stream, d_out,
d_numValidElements,
plan->m_d_outputIndices,
d_isValid, d_in, numElements);
else
hipLaunchKernelGGL(( compactData<T, false>), dim3(numBlocks), dim3(numThreads), 0, stream, d_out,
d_numValidElements,
plan->m_d_outputIndices,
d_isValid, d_in, numElements);
CUT_CHECK_ERROR("compactArray -- compactData");
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by cudppCompact().
*
* In addition to the internal CUDPPScanPlan contained in CUDPPCompactPlan,
* CUDPPCompact also needs a temporary device array of output indices, which
* is allocated by this function.
*
* @param plan Pointer to CUDPPCompactPlan object within which intermediate
* storage is allocated.
*/
void allocCompactStorage(CUDPPCompactPlan *plan)
{
CUDA_SAFE_CALL( hipMalloc((void**)&plan->m_d_outputIndices, sizeof(unsigned int) * plan->m_numElements) );
}
/** @brief Deallocate intermediate storage used by cudppCompact().
*
* Deallocates the output indices array allocated by allocCompactStorage().
*
* @param plan Pointer to CUDPPCompactPlan object initialized by allocCompactStorage().
*/
void freeCompactStorage(CUDPPCompactPlan *plan)
{
CUDA_SAFE_CALL( hipFree(plan->m_d_outputIndices));
}
/** @brief Dispatch compactArray for the specified datatype.
*
* A thin wrapper on top of compactArray which calls compactArray() for the data type
* specified in \a config. This is the app-level interface to compact used by
* cudppCompact().
*
* @param[out] d_out Compacted array of non-zero elements
* @param[out] d_numValidElements Pointer to an unsigned int to store the
* number of non-zero elements
* @param[in] d_in Input array
* @param[in] d_isValid Array of boolean valid flags with same length as
* \a d_in
* @param[in] numElements Number of elements to compact
* @param[in] stream Stream to execute the kernel on
* @param[in] plan Pointer to plan object for this compact
*
*/
void cudppCompactDispatch(void *d_out,
unsigned int *d_numValidElements,
const void *d_in,
const unsigned int *d_isValid,
size_t numElements,
const hipStream_t stream,
const CUDPPCompactPlan *plan)
{
switch (plan->m_config.datatype)
{
case CUDPP_CHAR:
compactArray<char>((char*)d_out, d_numValidElements,
(const char*)d_in, d_isValid, numElements, stream, plan);
break;
case CUDPP_UCHAR:
compactArray<unsigned char>((unsigned char*)d_out, d_numValidElements,
(const unsigned char*)d_in, d_isValid,
numElements, stream, plan);
break;
case CUDPP_INT:
compactArray<int>((int*)d_out, d_numValidElements,
(const int*)d_in, d_isValid, numElements, stream, plan);
break;
case CUDPP_UINT:
compactArray<unsigned int>((unsigned int*)d_out, d_numValidElements,
(const unsigned int*)d_in, d_isValid,
numElements, stream, plan);
break;
case CUDPP_FLOAT:
compactArray<float>((float*)d_out, d_numValidElements,
(const float*)d_in, d_isValid, numElements, stream, plan);
break;
default:
break;
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end compact functions
/** @} */ // end cudpp_app
| 537925d30f7e63c7b9e718f065f4128e01394195.cu | // -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 3049 $
// $Date: 2007-02-26 10:42:36 -0800 (Mon, 26 Feb 2007) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt in
// the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* compact_app.cu
*
* @brief CUDPP application-level compact routines
*/
#include "cudpp_globals.h"
#include "cudpp_util.h"
#include "cudpp.h"
#include "cudpp_plan.h"
#include "cudpp_scan.h"
#include "kernel/compact_kernel.cu"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
/** \addtogroup cudpp_app
* @{
*/
/** @name Compact Functions
* @{
*/
/** @brief Calculate launch parameters for compactArray().
*
* Calculates the block size and number of blocks from the total
* number of elements and the maximum threads per block. Called by
* compactArray().
*
* The calculation is pretty straightforward - the number of blocks
* is calculated by dividing the number of input elements by the product
* of the number of threads in each CTA and the number of elements each thread
* will process. numThreads and numEltsPerBlock are also simple to
* calculate. Please note that in cases where numElements is not an exact
* multiple of SCAN_ELTS_PER_THREAD * CTA_SIZE we would have threads
* which do nothing or have a thread which will process less than
* SCAN_ELTS_PER_THREAD elements.
*
*
* @param[in] numElements Number of elements to sort
* @param[out] numThreads Number of threads in each block
* @param[out] numBlocks Number of blocks
* @param[out] numEltsPerBlock Number of elements processed per block
*
*/
void calculatCompactLaunchParams(const unsigned int numElements,
unsigned int &numThreads,
unsigned int &numBlocks,
unsigned int &numEltsPerBlock)
{
numBlocks =
max(1, (int)ceil((float)numElements /
((float)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
numThreads = CTA_SIZE;
}
else
{
numThreads = (unsigned int)ceil((float)numElements / (float)SCAN_ELTS_PER_THREAD);
}
numEltsPerBlock = numThreads * SCAN_ELTS_PER_THREAD;
}
/** @brief Compact the non-zero elements of an array.
*
* Given an input array \a d_in, compactArray() outputs a compacted version
* which does not have null (zero) elements. Also ouputs the number of non-zero
* elements in the compacted array. Called by ::cudppCompactDispatch().
*
* The algorithm is straightforward, involving two steps (most of the
* complexity is hidden in scan, invoked with cudppScanDispatch() ).
*
* -# scanArray() performs a prefix sum on \a d_isValid to compute output
* indices.
* -# compactData() takes \a d_in and an intermediate array of output indices
* as input and writes the values with valid flags in \a d_isValid into
* \a d_out using the output indices.
*
* @param[out] d_out Array of compacted non-null elements
* @param[out] d_numValidElements Pointer to unsigned int to store number of
* non-null elements
* @param[in] d_in Input array
* @param[out] d_isValid Array of flags, 1 for each non-null element, 0
* for each null element. Same length as \a d_in
* @param[in] numElements Number of elements in input array
* @param[in] stream The stream to execute the kernel on
* @param[in] plan Pointer to the plan object used for this compact
*
*/
template<class T>
void compactArray(T *d_out,
unsigned int *d_numValidElements,
const T *d_in,
const unsigned int *d_isValid,
size_t numElements,
const cudaStream_t stream,
const CUDPPCompactPlan *plan)
{
unsigned int numThreads = 0;
unsigned int numBlocks = 0;
unsigned int numEltsPerBlock = 0;
// Calculate CUDA launch parameters - number of blocks, number of threads
// @todo What is numEltsPerBlock doing here?
calculatCompactLaunchParams(numElements, numThreads, numBlocks, numEltsPerBlock);
// Run prefix sum on isValid array to find the addresses in the compacted
// output array where each non-null element of d_in will go to
cudppScanDispatch((void*)plan->m_d_outputIndices, (void*)d_isValid,
numElements, 1, stream, plan->m_scanPlan);
// For every non-null element in d_in write it to its proper place in the
// d_out. This is indicated by the corresponding element in isValid array
if (plan->m_config.options & CUDPP_OPTION_BACKWARD)
compactData<T, true><<<numBlocks, numThreads, 0, stream>>>(d_out,
d_numValidElements,
plan->m_d_outputIndices,
d_isValid, d_in, numElements);
else
compactData<T, false><<<numBlocks, numThreads, 0, stream>>>(d_out,
d_numValidElements,
plan->m_d_outputIndices,
d_isValid, d_in, numElements);
CUT_CHECK_ERROR("compactArray -- compactData");
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by cudppCompact().
*
* In addition to the internal CUDPPScanPlan contained in CUDPPCompactPlan,
* CUDPPCompact also needs a temporary device array of output indices, which
* is allocated by this function.
*
* @param plan Pointer to CUDPPCompactPlan object within which intermediate
* storage is allocated.
*/
void allocCompactStorage(CUDPPCompactPlan *plan)
{
CUDA_SAFE_CALL( cudaMalloc((void**)&plan->m_d_outputIndices, sizeof(unsigned int) * plan->m_numElements) );
}
/** @brief Deallocate intermediate storage used by cudppCompact().
*
* Deallocates the output indices array allocated by allocCompactStorage().
*
* @param plan Pointer to CUDPPCompactPlan object initialized by allocCompactStorage().
*/
void freeCompactStorage(CUDPPCompactPlan *plan)
{
CUDA_SAFE_CALL( cudaFree(plan->m_d_outputIndices));
}
/** @brief Dispatch compactArray for the specified datatype.
*
* A thin wrapper on top of compactArray which calls compactArray() for the data type
* specified in \a config. This is the app-level interface to compact used by
* cudppCompact().
*
* @param[out] d_out Compacted array of non-zero elements
* @param[out] d_numValidElements Pointer to an unsigned int to store the
* number of non-zero elements
* @param[in] d_in Input array
* @param[in] d_isValid Array of boolean valid flags with same length as
* \a d_in
* @param[in] numElements Number of elements to compact
* @param[in] stream Stream to execute the kernel on
* @param[in] plan Pointer to plan object for this compact
*
*/
void cudppCompactDispatch(void *d_out,
unsigned int *d_numValidElements,
const void *d_in,
const unsigned int *d_isValid,
size_t numElements,
const cudaStream_t stream,
const CUDPPCompactPlan *plan)
{
switch (plan->m_config.datatype)
{
case CUDPP_CHAR:
compactArray<char>((char*)d_out, d_numValidElements,
(const char*)d_in, d_isValid, numElements, stream, plan);
break;
case CUDPP_UCHAR:
compactArray<unsigned char>((unsigned char*)d_out, d_numValidElements,
(const unsigned char*)d_in, d_isValid,
numElements, stream, plan);
break;
case CUDPP_INT:
compactArray<int>((int*)d_out, d_numValidElements,
(const int*)d_in, d_isValid, numElements, stream, plan);
break;
case CUDPP_UINT:
compactArray<unsigned int>((unsigned int*)d_out, d_numValidElements,
(const unsigned int*)d_in, d_isValid,
numElements, stream, plan);
break;
case CUDPP_FLOAT:
compactArray<float>((float*)d_out, d_numValidElements,
(const float*)d_in, d_isValid, numElements, stream, plan);
break;
default:
break;
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end compact functions
/** @} */ // end cudpp_app
|
fbc3f8650566c65f6d136711f66fce7f41ae40ec.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/normalize/normalize_gpu.h" // NOLINT
#include "dali/kernels/normalize/normalize_gpu_impl.cuh" // NOLINT
#include <gtest/gtest.h>
#include <cmath>
#include <initializer_list>
#include <iostream>
#include <random>
#include <utility>
#include "dali/core/cuda_event.h"
#include "dali/kernels/kernel_manager.h"
#include "dali/test/device_test.h"
#include "dali/test/test_tensors.h"
#include "dali/test/tensor_test_utils.h"
namespace dali {
namespace kernels {
template <bool calc_inv_stddev, typename Out, typename In>
void RefNormalize(
const OutTensorCPU<Out> &out,
const InTensorCPU<In> &in, const InTensorCPU<float> &base,
const InTensorCPU<float> &scale,
float epsilon, float global_scale, float shift,
TensorShape<> &data_pos, TensorShape<> &base_pos, TensorShape<> &scale_pos, int dim) {
int db = 0, ds = 0;
int64_t extent = 0;
if (dim < in.dim()) {
db = base.shape[dim] > 1 ? 1 : 0;
ds = scale.shape[dim] > 1 ? 1 : 0;
extent = in.shape[dim];
}
if (dim >= in.dim() - 1) { // handles both last dimension and degenerate case
Out *optr = out(data_pos);
const In *iptr = in(data_pos);
const float *sptr = scale(scale_pos);
const float *bptr = base(base_pos);
for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) {
float mul;
if (calc_inv_stddev) {
float x = sptr[s] * sptr[s] + epsilon;
mul = x ? rsqrt(x) * global_scale : 0;
} else {
mul = sptr[s] * global_scale;
}
optr[i] = ConvertSat<Out>(std::fma(iptr[i] - bptr[b], mul, shift));
}
} else {
for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) {
data_pos[dim] = i;
base_pos[dim] = b;
scale_pos[dim] = s;
RefNormalize<calc_inv_stddev>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, dim + 1);
}
}
}
/**
* @brief Reference normalization of a single tensor
*
* If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis.
*
* @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which
* is subsequently regularized using given epsilon value
*/
template <typename Out, typename In>
void RefNormalize(
const OutTensorCPU<Out> &out,
const InTensorCPU<In> &in, const InTensorCPU<float> &base,
const InTensorCPU<float> &scale,
float global_scale, float shift,
bool calc_inv_stddev, float epsilon) {
TensorShape<> data_pos, base_pos, scale_pos;
int D = in.dim();
data_pos.resize(D);
base_pos.resize(D);
scale_pos.resize(D);
if (calc_inv_stddev) {
RefNormalize<true>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, 0);
} else {
RefNormalize<false>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, 0);
}
}
/**
* @brief Reference implementation of normalization
*
* Goes over all input samples and normalizes them using given base and scale tensor lists.
* If base/scale TL has 1 element, it is reused for normalization of all samples.
* If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis.
*
* @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which
* is subsequently regularized using given epsilon value
*/
template <typename Out, typename In>
void RefNormalize(
const OutListCPU<Out> &out, const TensorListView<StorageCPU, In> &in,
const InListCPU<float> &base, const InListCPU<float> &scale,
float global_scale, float shift,
bool calc_inv_stddev = false, float epsilon = 0) {
assert(out.shape == in.shape);
int N = in.num_samples();
int db = base.num_samples() > 1;
int ds = scale.num_samples() > 1;
for (int i = 0, b = 0, s = 0; i < N; i++, b += db, s += ds) {
RefNormalize<Out, In>(out[i], in[i], base[b], scale[s],
global_scale, shift, calc_inv_stddev, epsilon);
}
}
template <typename RNG>
TensorListShape<>
RandomDataShape(int num_samples, int ndim, int64_t max_volume,
uint64_t reduced_axes, bool reduce_batch, RNG &rng) {
assert(max_volume >= 1);
TensorListShape<> sh;
sh.resize(num_samples, ndim);
int64_t extent_range = ::ceil(pow(max_volume, 1.0 / ndim));
std::uniform_int_distribution<int64_t> shape_dist(1, extent_range);
for (int i = 0; i < num_samples; i++) {
auto sample_shape = sh.tensor_shape_span(i);
do {
for (int d = 0; d < ndim; d++) {
// when reducing samples in the batch, the non-reduced extents must be uniform
// across all samples
sample_shape[d] = reduced_axes & (1_u64 << d) || !reduce_batch || i == 0
? shape_dist(rng)
: sh.tensor_shape_span(0)[d];
}
} while (volume(sample_shape) > max_volume);
}
return sh;
}
/**
* @brief Creates a tensor list which contains a repeated scalar
*
* If ndim > 0, then the tensor list will contain 1x1x...x1 tensors with given dimensionality
*/
template <typename T>
TensorListView<StorageCPU, T> ScalarTLV(T &scalar, int num_samples, int ndim = 0) {
TensorListView<StorageCPU, T> tlv;
TensorShape<> ts;
ts.resize(ndim);
for (int d = 0; d < ndim; d++)
ts[d] = 1;
tlv.shape = uniform_list_shape(num_samples, ts);
tlv.data.resize(num_samples);
for (int i = 0 ; i < num_samples; i++)
tlv.data[i] = &scalar;
return tlv;
}
template <typename Params>
class NormalizeImplGPUTest;
template <typename Out, typename In>
class NormalizeImplGPUTest<std::pair<Out, In>> : public ::testing::Test {
public:
// this will test both the top-level pImpl class and the internal implementation class
using Kernel = std::conditional_t<std::is_same<Out, In>::value,
NormalizeGPU<Out, In>,
normalize_impl::NormalizeImplGPU<Out, In, float, float>
>;
void Init(int num_samples, int ndim, int64_t max_sample_volume,
std::initializer_list<int> reduced_axes, bool reduce_batch,
bool scalar_base, bool scalar_scale, bool scale_is_stddev) {
Init(num_samples, ndim, max_sample_volume,
{ reduced_axes.begin(), reduced_axes.end() }, reduce_batch,
scalar_base, scalar_scale, scale_is_stddev);
}
void Init(int num_samples, int ndim, int64_t max_sample_volume,
span<const int> reduced_axes, bool reduce_batch,
bool scalar_base, bool scalar_scale, bool scale_is_stddev) {
In lo = 0, hi = 100;
use_scalar_base_ = scalar_base;
use_scalar_scale_ = scalar_scale;
axis_mask_ = to_bit_mask(reduced_axes);
reduced_axes_ = { begin(reduced_axes), end(reduced_axes) };
reduce_batch_ = reduce_batch;
scale_is_stddev_ = scale_is_stddev;
data_shape_ = RandomDataShape(num_samples, ndim, max_sample_volume,
axis_mask_, reduce_batch_, rng_);
in_.reshape(data_shape_);
UniformRandomFill(in_.cpu(), rng_, lo, hi);
if (!scalar_base || !scalar_scale) {
int param_samples = reduce_batch ? 1 : num_samples;
param_shape_.resize(param_samples, ndim);
for (int i = 0; i < param_samples; i++) {
for (int d = 0; d < ndim; d++) {
bool reduced = axis_mask_ & (1_u64 << d);
param_shape_.tensor_shape_span(i)[d] = reduced ? 1 : data_shape_.tensor_shape_span(i)[d];
}
}
} else {
param_shape_.resize(1, 0);
}
auto scale_dist = uniform_distribution(0.1f, 10.0f);
if (scalar_scale) {
scalar_scale_ = scale_dist(rng_);
} else {
scale_.reshape(param_shape_);
UniformRandomFill(scale_.cpu(), rng_, scale_dist.a(), scale_dist.b());
}
if (scalar_base) {
scalar_base_ = uniform_distribution(lo, hi)(rng_);
} else {
base_.reshape(param_shape_);
UniformRandomFill(base_.cpu(), rng_, lo, hi);
}
if (std::is_integral<Out>::value) {
global_scale_ = std::exp2f(7 * sizeof(Out)) / hi; // scale to half range
if (std::is_unsigned<Out>::value)
shift_ = global_scale_; // shift half range up
}
}
void RunTest() {
kmgr_.Resize<Kernel>(1);
KernelContext ctx;
ctx.gpu.stream = 0;
for (int iter = 0; iter < 3; iter++) {
auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_,
use_scalar_base_, use_scalar_scale_, scale_is_stddev_);
ASSERT_EQ(req.output_shapes.size(), 1u);
ASSERT_EQ(req.output_shapes[0], data_shape_);
out_.reshape(data_shape_);
ref_.reshape(data_shape_);
Launch(ctx);
int param_samples = param_shape_.num_samples();
auto ref_base = use_scalar_base_
? ScalarTLV(scalar_base_, param_samples, data_shape_.sample_dim())
: base_.cpu();
auto ref_scale = use_scalar_scale_
? ScalarTLV(scalar_scale_, param_samples, data_shape_.sample_dim())
: scale_.cpu();
RefNormalize(ref_.cpu(), in_.cpu(), ref_base, ref_scale,
global_scale_, shift_, scale_is_stddev_, epsilon_);
if (scale_is_stddev_ && !std::is_integral<Out>::value)
Check(out_.cpu(), ref_.cpu(), EqualEpsRel(1e-6, 1e-6));
else
Check(out_.cpu(), ref_.cpu(), EqualUlp(4));
}
}
void RunPerf() {
kmgr_.Resize<Kernel>(1);
KernelContext ctx;
ctx.gpu.stream = 0;
auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_,
use_scalar_base_, use_scalar_scale_, scale_is_stddev_);
ASSERT_EQ(req.output_shapes.size(), 1u);
ASSERT_EQ(req.output_shapes[0], data_shape_);
out_.reshape(data_shape_);
CUDAEvent start = CUDAEvent::CreateWithFlags(0);
CUDAEvent end = CUDAEvent::CreateWithFlags(0);
auto out_gpu = out_.gpu();
CUDA_CALL(
hipMemsetAsync(out_gpu.data[0], 0, sizeof(Out) * out_gpu.num_elements(), ctx.gpu.stream));
Launch(ctx);
CUDA_CALL(hipEventRecord(start, ctx.gpu.stream));
Launch(ctx);
CUDA_CALL(hipEventRecord(end, ctx.gpu.stream));
float time;
CUDA_CALL(hipDeviceSynchronize());
CUDA_CALL(hipEventElapsedTime(&time, start, end));
time *= 1e+6f; // convert to nanoseconds
int64_t out_size = data_shape_.num_elements() * sizeof(Out);
int64_t in_size = data_shape_.num_elements() * sizeof(In);
int64_t base_size = scalar_base_ ? 0 : param_shape_.num_elements() * sizeof(float);
int64_t scale_size = scalar_scale_ ? 0 : param_shape_.num_elements() * sizeof(float);
int64_t data_size = out_size + in_size + base_size + scale_size;
std::cerr << "Throughput: " << data_size / time << " GB/s\n";
}
void Launch(KernelContext &ctx) {
if (use_scalar_base_) {
if (use_scalar_scale_) {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scalar_scale_,
global_scale_, shift_, epsilon_);
} else {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scale_.gpu(),
global_scale_, shift_, epsilon_);
}
} else {
if (use_scalar_scale_) {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scalar_scale_,
global_scale_, shift_, epsilon_);
} else {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scale_.gpu(),
global_scale_, shift_, epsilon_);
}
}
}
protected:
KernelManager kmgr_;
TestTensorList<In> in_;
TestTensorList<Out> out_;
TestTensorList<float> ref_;
TestTensorList<float> base_, scale_;
TensorListShape<> data_shape_, param_shape_;
SmallVector<int, 6> reduced_axes_;
uint64_t axis_mask_;
bool reduce_batch_ = false;
bool use_scalar_base_ = false;
bool use_scalar_scale_ = false;
bool scale_is_stddev_ = false;
float scalar_base_ = 0, scalar_scale_ = 1;
float global_scale_ = 1.25f, shift_ = 0.1f, epsilon_ = 0.2f;
std::mt19937_64 rng_;
};
using NormalizeTestTypes = ::testing::Types<
std::pair<int16_t, uint8_t>,
std::pair<float, uint16_t>,
std::pair<float, float>>;
TYPED_TEST_SUITE(NormalizeImplGPUTest, NormalizeTestTypes);
TYPED_TEST(NormalizeImplGPUTest, NonScalar) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarBase) {
this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarScale) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, true, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, true, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarParams) {
this->Init(10, 4, 10000, {}, false, true, true, false);
this->RunTest();
this->Init(10, 3, 10000, {}, true, true, true, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, NonScalar_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, false, true);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, false, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarBase_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarScale_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, true, true);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, true, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarParams_InvStdDev) {
this->Init(10, 4, 10000, {}, false, true, true, true);
this->RunTest();
this->Init(10, 3, 10000, {}, true, true, true, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D) {
this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01) {
this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12) {
this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams) {
this->Init(64, 3, 1<<20, {}, false, true, true, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D_InvStdDev) {
this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01_InvStdDev) {
this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12_InvStdDev) {
this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams_InvStdDev) {
this->Init(64, 3, 1<<20, {}, false, true, true, true);
this->RunPerf();
}
} // namespace kernels
} // namespace dali
| fbc3f8650566c65f6d136711f66fce7f41ae40ec.cu | // Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/normalize/normalize_gpu.h" // NOLINT
#include "dali/kernels/normalize/normalize_gpu_impl.cuh" // NOLINT
#include <gtest/gtest.h>
#include <cmath>
#include <initializer_list>
#include <iostream>
#include <random>
#include <utility>
#include "dali/core/cuda_event.h"
#include "dali/kernels/kernel_manager.h"
#include "dali/test/device_test.h"
#include "dali/test/test_tensors.h"
#include "dali/test/tensor_test_utils.h"
namespace dali {
namespace kernels {
template <bool calc_inv_stddev, typename Out, typename In>
void RefNormalize(
const OutTensorCPU<Out> &out,
const InTensorCPU<In> &in, const InTensorCPU<float> &base,
const InTensorCPU<float> &scale,
float epsilon, float global_scale, float shift,
TensorShape<> &data_pos, TensorShape<> &base_pos, TensorShape<> &scale_pos, int dim) {
int db = 0, ds = 0;
int64_t extent = 0;
if (dim < in.dim()) {
db = base.shape[dim] > 1 ? 1 : 0;
ds = scale.shape[dim] > 1 ? 1 : 0;
extent = in.shape[dim];
}
if (dim >= in.dim() - 1) { // handles both last dimension and degenerate case
Out *optr = out(data_pos);
const In *iptr = in(data_pos);
const float *sptr = scale(scale_pos);
const float *bptr = base(base_pos);
for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) {
float mul;
if (calc_inv_stddev) {
float x = sptr[s] * sptr[s] + epsilon;
mul = x ? rsqrt(x) * global_scale : 0;
} else {
mul = sptr[s] * global_scale;
}
optr[i] = ConvertSat<Out>(std::fma(iptr[i] - bptr[b], mul, shift));
}
} else {
for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) {
data_pos[dim] = i;
base_pos[dim] = b;
scale_pos[dim] = s;
RefNormalize<calc_inv_stddev>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, dim + 1);
}
}
}
/**
* @brief Reference normalization of a single tensor
*
* If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis.
*
* @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which
* is subsequently regularized using given epsilon value
*/
template <typename Out, typename In>
void RefNormalize(
const OutTensorCPU<Out> &out,
const InTensorCPU<In> &in, const InTensorCPU<float> &base,
const InTensorCPU<float> &scale,
float global_scale, float shift,
bool calc_inv_stddev, float epsilon) {
TensorShape<> data_pos, base_pos, scale_pos;
int D = in.dim();
data_pos.resize(D);
base_pos.resize(D);
scale_pos.resize(D);
if (calc_inv_stddev) {
RefNormalize<true>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, 0);
} else {
RefNormalize<false>(out, in, base, scale, epsilon, global_scale, shift,
data_pos, base_pos, scale_pos, 0);
}
}
/**
* @brief Reference implementation of normalization
*
* Goes over all input samples and normalizes them using given base and scale tensor lists.
* If base/scale TL has 1 element, it is reused for normalization of all samples.
* If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis.
*
* @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which
* is subsequently regularized using given epsilon value
*/
template <typename Out, typename In>
void RefNormalize(
const OutListCPU<Out> &out, const TensorListView<StorageCPU, In> &in,
const InListCPU<float> &base, const InListCPU<float> &scale,
float global_scale, float shift,
bool calc_inv_stddev = false, float epsilon = 0) {
assert(out.shape == in.shape);
int N = in.num_samples();
int db = base.num_samples() > 1;
int ds = scale.num_samples() > 1;
for (int i = 0, b = 0, s = 0; i < N; i++, b += db, s += ds) {
RefNormalize<Out, In>(out[i], in[i], base[b], scale[s],
global_scale, shift, calc_inv_stddev, epsilon);
}
}
template <typename RNG>
TensorListShape<>
RandomDataShape(int num_samples, int ndim, int64_t max_volume,
uint64_t reduced_axes, bool reduce_batch, RNG &rng) {
assert(max_volume >= 1);
TensorListShape<> sh;
sh.resize(num_samples, ndim);
int64_t extent_range = std::ceil(pow(max_volume, 1.0 / ndim));
std::uniform_int_distribution<int64_t> shape_dist(1, extent_range);
for (int i = 0; i < num_samples; i++) {
auto sample_shape = sh.tensor_shape_span(i);
do {
for (int d = 0; d < ndim; d++) {
// when reducing samples in the batch, the non-reduced extents must be uniform
// across all samples
sample_shape[d] = reduced_axes & (1_u64 << d) || !reduce_batch || i == 0
? shape_dist(rng)
: sh.tensor_shape_span(0)[d];
}
} while (volume(sample_shape) > max_volume);
}
return sh;
}
/**
* @brief Creates a tensor list which contains a repeated scalar
*
* If ndim > 0, then the tensor list will contain 1x1x...x1 tensors with given dimensionality
*/
template <typename T>
TensorListView<StorageCPU, T> ScalarTLV(T &scalar, int num_samples, int ndim = 0) {
TensorListView<StorageCPU, T> tlv;
TensorShape<> ts;
ts.resize(ndim);
for (int d = 0; d < ndim; d++)
ts[d] = 1;
tlv.shape = uniform_list_shape(num_samples, ts);
tlv.data.resize(num_samples);
for (int i = 0 ; i < num_samples; i++)
tlv.data[i] = &scalar;
return tlv;
}
template <typename Params>
class NormalizeImplGPUTest;
template <typename Out, typename In>
class NormalizeImplGPUTest<std::pair<Out, In>> : public ::testing::Test {
public:
// this will test both the top-level pImpl class and the internal implementation class
using Kernel = std::conditional_t<std::is_same<Out, In>::value,
NormalizeGPU<Out, In>,
normalize_impl::NormalizeImplGPU<Out, In, float, float>
>;
void Init(int num_samples, int ndim, int64_t max_sample_volume,
std::initializer_list<int> reduced_axes, bool reduce_batch,
bool scalar_base, bool scalar_scale, bool scale_is_stddev) {
Init(num_samples, ndim, max_sample_volume,
{ reduced_axes.begin(), reduced_axes.end() }, reduce_batch,
scalar_base, scalar_scale, scale_is_stddev);
}
void Init(int num_samples, int ndim, int64_t max_sample_volume,
span<const int> reduced_axes, bool reduce_batch,
bool scalar_base, bool scalar_scale, bool scale_is_stddev) {
In lo = 0, hi = 100;
use_scalar_base_ = scalar_base;
use_scalar_scale_ = scalar_scale;
axis_mask_ = to_bit_mask(reduced_axes);
reduced_axes_ = { begin(reduced_axes), end(reduced_axes) };
reduce_batch_ = reduce_batch;
scale_is_stddev_ = scale_is_stddev;
data_shape_ = RandomDataShape(num_samples, ndim, max_sample_volume,
axis_mask_, reduce_batch_, rng_);
in_.reshape(data_shape_);
UniformRandomFill(in_.cpu(), rng_, lo, hi);
if (!scalar_base || !scalar_scale) {
int param_samples = reduce_batch ? 1 : num_samples;
param_shape_.resize(param_samples, ndim);
for (int i = 0; i < param_samples; i++) {
for (int d = 0; d < ndim; d++) {
bool reduced = axis_mask_ & (1_u64 << d);
param_shape_.tensor_shape_span(i)[d] = reduced ? 1 : data_shape_.tensor_shape_span(i)[d];
}
}
} else {
param_shape_.resize(1, 0);
}
auto scale_dist = uniform_distribution(0.1f, 10.0f);
if (scalar_scale) {
scalar_scale_ = scale_dist(rng_);
} else {
scale_.reshape(param_shape_);
UniformRandomFill(scale_.cpu(), rng_, scale_dist.a(), scale_dist.b());
}
if (scalar_base) {
scalar_base_ = uniform_distribution(lo, hi)(rng_);
} else {
base_.reshape(param_shape_);
UniformRandomFill(base_.cpu(), rng_, lo, hi);
}
if (std::is_integral<Out>::value) {
global_scale_ = std::exp2f(7 * sizeof(Out)) / hi; // scale to half range
if (std::is_unsigned<Out>::value)
shift_ = global_scale_; // shift half range up
}
}
void RunTest() {
kmgr_.Resize<Kernel>(1);
KernelContext ctx;
ctx.gpu.stream = 0;
for (int iter = 0; iter < 3; iter++) {
auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_,
use_scalar_base_, use_scalar_scale_, scale_is_stddev_);
ASSERT_EQ(req.output_shapes.size(), 1u);
ASSERT_EQ(req.output_shapes[0], data_shape_);
out_.reshape(data_shape_);
ref_.reshape(data_shape_);
Launch(ctx);
int param_samples = param_shape_.num_samples();
auto ref_base = use_scalar_base_
? ScalarTLV(scalar_base_, param_samples, data_shape_.sample_dim())
: base_.cpu();
auto ref_scale = use_scalar_scale_
? ScalarTLV(scalar_scale_, param_samples, data_shape_.sample_dim())
: scale_.cpu();
RefNormalize(ref_.cpu(), in_.cpu(), ref_base, ref_scale,
global_scale_, shift_, scale_is_stddev_, epsilon_);
if (scale_is_stddev_ && !std::is_integral<Out>::value)
Check(out_.cpu(), ref_.cpu(), EqualEpsRel(1e-6, 1e-6));
else
Check(out_.cpu(), ref_.cpu(), EqualUlp(4));
}
}
void RunPerf() {
kmgr_.Resize<Kernel>(1);
KernelContext ctx;
ctx.gpu.stream = 0;
auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_,
use_scalar_base_, use_scalar_scale_, scale_is_stddev_);
ASSERT_EQ(req.output_shapes.size(), 1u);
ASSERT_EQ(req.output_shapes[0], data_shape_);
out_.reshape(data_shape_);
CUDAEvent start = CUDAEvent::CreateWithFlags(0);
CUDAEvent end = CUDAEvent::CreateWithFlags(0);
auto out_gpu = out_.gpu();
CUDA_CALL(
cudaMemsetAsync(out_gpu.data[0], 0, sizeof(Out) * out_gpu.num_elements(), ctx.gpu.stream));
Launch(ctx);
CUDA_CALL(cudaEventRecord(start, ctx.gpu.stream));
Launch(ctx);
CUDA_CALL(cudaEventRecord(end, ctx.gpu.stream));
float time;
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaEventElapsedTime(&time, start, end));
time *= 1e+6f; // convert to nanoseconds
int64_t out_size = data_shape_.num_elements() * sizeof(Out);
int64_t in_size = data_shape_.num_elements() * sizeof(In);
int64_t base_size = scalar_base_ ? 0 : param_shape_.num_elements() * sizeof(float);
int64_t scale_size = scalar_scale_ ? 0 : param_shape_.num_elements() * sizeof(float);
int64_t data_size = out_size + in_size + base_size + scale_size;
std::cerr << "Throughput: " << data_size / time << " GB/s\n";
}
void Launch(KernelContext &ctx) {
if (use_scalar_base_) {
if (use_scalar_scale_) {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scalar_scale_,
global_scale_, shift_, epsilon_);
} else {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scale_.gpu(),
global_scale_, shift_, epsilon_);
}
} else {
if (use_scalar_scale_) {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scalar_scale_,
global_scale_, shift_, epsilon_);
} else {
kmgr_.Run<Kernel>(0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scale_.gpu(),
global_scale_, shift_, epsilon_);
}
}
}
protected:
KernelManager kmgr_;
TestTensorList<In> in_;
TestTensorList<Out> out_;
TestTensorList<float> ref_;
TestTensorList<float> base_, scale_;
TensorListShape<> data_shape_, param_shape_;
SmallVector<int, 6> reduced_axes_;
uint64_t axis_mask_;
bool reduce_batch_ = false;
bool use_scalar_base_ = false;
bool use_scalar_scale_ = false;
bool scale_is_stddev_ = false;
float scalar_base_ = 0, scalar_scale_ = 1;
float global_scale_ = 1.25f, shift_ = 0.1f, epsilon_ = 0.2f;
std::mt19937_64 rng_;
};
using NormalizeTestTypes = ::testing::Types<
std::pair<int16_t, uint8_t>,
std::pair<float, uint16_t>,
std::pair<float, float>>;
TYPED_TEST_SUITE(NormalizeImplGPUTest, NormalizeTestTypes);
TYPED_TEST(NormalizeImplGPUTest, NonScalar) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarBase) {
this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarScale) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, true, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, true, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarParams) {
this->Init(10, 4, 10000, {}, false, true, true, false);
this->RunTest();
this->Init(10, 3, 10000, {}, true, true, true, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, NonScalar_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, false, true);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, false, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarBase_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarScale_InvStdDev) {
this->Init(10, 4, 10000, { 1, 3 }, false, false, true, true);
this->RunTest();
this->Init(10, 3, 10000, { 0, 2 }, true, false, true, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, ScalarParams_InvStdDev) {
this->Init(10, 4, 10000, {}, false, true, true, true);
this->RunTest();
this->Init(10, 3, 10000, {}, true, true, true, true);
this->RunTest();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D) {
this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01) {
this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12) {
this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams) {
this->Init(64, 3, 1<<20, {}, false, true, true, false);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D_InvStdDev) {
this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01_InvStdDev) {
this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12_InvStdDev) {
this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, true);
this->RunPerf();
}
TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams_InvStdDev) {
this->Init(64, 3, 1<<20, {}, false, true, true, true);
this->RunPerf();
}
} // namespace kernels
} // namespace dali
|
5c0e578bb3035bb2e661bc5635363ab9fb05c7c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.size(), P.data<float>(), dX->template mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| 5c0e578bb3035bb2e661bc5635363ab9fb05c7c2.cu | #include <cfloat>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.size(), P.data<float>(), dX->template mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
0b981d5dc22c5d82161d759fbee8126c157174c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<iostream>
#include <unistd.h>
using namespace std;
const int numElems =2;
__global__ void dataKernel( double* data, int nsteps){
//this adds a value to a variable stored in global memory
int thid = threadIdx.x;
//data[thid] = 0;
int i = 0;
bool wait = 1;
clock_t start = clock64();
clock_t now;
while(i < nsteps){
data[thid] = data[thid]+.1;
clock_t start = clock64();
i = i+1;
while(wait == 1){
now = clock();
clock_t cycles = now > start ? now - start : now + (0xffffffff - start);
if(cycles > 5000)
wait = 0;
}
wait = 1;
__syncthreads();
}
}
__global__ void monitorKernel(double * write_2_ptr, double * read_in_ptr){
*write_2_ptr = *read_in_ptr;
}
int main()
{
cout <<"Running CUDA init" << endl;
double *dArray;
int i = 0;
//pointer of helper function return
double *h_data;
//double monitor_data[numElems];
double *monitor_data_dev;
hipMalloc((void**)&dArray, sizeof(double)*numElems);
hipMemset(dArray, 0, numElems*sizeof(double));
hipHostMalloc((void**)&h_data, sizeof(double)*numElems);
hipStream_t stream1;
hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking);
hipMalloc((void**)&monitor_data_dev, sizeof(double)*numElems);
cout <<"Launching Helper Kernel" << endl;
//*help_rdy = help_fcn(*help_input, out);
hipLaunchKernelGGL(( dataKernel), dim3(1),dim3(numElems), 0, 0, dArray, 1000);
sleep(.4);
cout <<"Launching Monitor Kernel" << endl;
//hipStreamSynchronize(stream1);
hipLaunchKernelGGL(( monitorKernel), dim3(1), dim3(1),0, stream1, monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
hipMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), hipMemcpyDeviceToHost, stream1);
hipStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
sleep(.3);
cout <<"Launching Monitor Kernel" << endl;
//hipStreamSynchronize(stream1);
hipLaunchKernelGGL(( monitorKernel), dim3(1), dim3(1),0, stream1, monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
hipMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), hipMemcpyDeviceToHost, stream1);
hipStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
hipMemcpy(h_data, dArray, sizeof(double)*numElems, hipMemcpyDeviceToHost);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
hipFree(dArray);
hipFree(monitor_data_dev);
return 0;
}
| 0b981d5dc22c5d82161d759fbee8126c157174c8.cu | #include<cuda.h>
#include<iostream>
#include <unistd.h>
using namespace std;
const int numElems =2;
__global__ void dataKernel( double* data, int nsteps){
//this adds a value to a variable stored in global memory
int thid = threadIdx.x;
//data[thid] = 0;
int i = 0;
bool wait = 1;
clock_t start = clock64();
clock_t now;
while(i < nsteps){
data[thid] = data[thid]+.1;
clock_t start = clock64();
i = i+1;
while(wait == 1){
now = clock();
clock_t cycles = now > start ? now - start : now + (0xffffffff - start);
if(cycles > 5000)
wait = 0;
}
wait = 1;
__syncthreads();
}
}
__global__ void monitorKernel(double * write_2_ptr, double * read_in_ptr){
*write_2_ptr = *read_in_ptr;
}
int main()
{
cout <<"Running CUDA init" << endl;
double *dArray;
int i = 0;
//pointer of helper function return
double *h_data;
//double monitor_data[numElems];
double *monitor_data_dev;
cudaMalloc((void**)&dArray, sizeof(double)*numElems);
cudaMemset(dArray, 0, numElems*sizeof(double));
cudaMallocHost((void**)&h_data, sizeof(double)*numElems);
cudaStream_t stream1;
cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
cudaMalloc((void**)&monitor_data_dev, sizeof(double)*numElems);
cout <<"Launching Helper Kernel" << endl;
//*help_rdy = help_fcn(*help_input, out);
dataKernel<<<1,numElems>>>(dArray, 1000);
sleep(.4);
cout <<"Launching Monitor Kernel" << endl;
//cudaStreamSynchronize(stream1);
monitorKernel<<<1, 1,0, stream1>>>(monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
cudaMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), cudaMemcpyDeviceToHost, stream1);
cudaStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
sleep(.3);
cout <<"Launching Monitor Kernel" << endl;
//cudaStreamSynchronize(stream1);
monitorKernel<<<1, 1,0, stream1>>>(monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
cudaMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), cudaMemcpyDeviceToHost, stream1);
cudaStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
cudaMemcpy(h_data, dArray, sizeof(double)*numElems, cudaMemcpyDeviceToHost);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
cudaFree(dArray);
cudaFree(monitor_data_dev);
return 0;
}
|
63f880b5d19763ae2610eab640384c392483d333.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <wb.h>
#include <stdint.h>
#include <ctype.h>
#include <stdio.h>
#include <unistd.h>
#include "hamc_cpu_code.c"
#include "hamc_common.h"
#include "decrypt.cu"
#include "encrypt.cu"
#include "keygen.cu"
#include "hamc_e2e.cu"
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
void printHelp();
void printWelcome()
{
printf("HAMC Version %s0.1%s\n", YELLOW, NC);
printf("Developed by Mitchell Dzurick, Mitchell Russel, James Kuban");
}
int main(int argc, char *argv[]) {
printWelcome();
/* variables for timing operations */
/* input parameters */
int n = 2, p = 500, w = 30, t = 10, seed = 10;
char *outputFileName = NULL, *inputFileName = NULL, *action = NULL;
char *keyFile = NULL;
/* determines whether to run CPU based implementation, default no */
bool cpu = false;
bool verbose = false;
int c;
opterr = 0;
while ((c = getopt (argc, argv, "a:n:p:w:t:i:o:hs:cs:vk:")) != -1)
switch(c)
{
case 'k':
keyFile = strdup(optarg);
break;
case 'v':
verbose = true;
break;
case 'c':
cpu = true;
break;
case 'n':
n = atoi(optarg);
break;
case 's':
seed = atoi(optarg);
break;
case 'p':
p = atoi(optarg);
break;
case 'w':
w = atoi(optarg);
break;
case 't':
t = atoi(optarg);
break;
case 'i':
inputFileName = strdup(optarg);
break;
case 'o':
outputFileName = strdup(optarg);
break;
case 'a':
action = strdup(optarg);
break;
case 'h':
printHelp();
return(1);
default:
abort();
}
int k = (n - 1) * p;
bool test = false;
if (!strcmp(action, (const char*)"test")) test = true;
/* print input parameters */
printf("\n");
printf("Input Parameters:\n");
if (!test) printf("\tInput file: %s%s%s\n", YELLOW, inputFileName, NC);
if (!test) printf("\tOutput file: %s%s%s\n", YELLOW, outputFileName, NC);
printf("\tGPU based execution: ");
if (!cpu) printf("%son%s\n", GREEN, NC);
else printf("%soff%s\n", RED, NC);
printf("\tn: %s%d%s\n", YELLOW, n, NC);
printf("\tp: %s%d%s\n", YELLOW, p, NC);
printf("\tw: %s%d%s\n", YELLOW, w, NC);
printf("\tt: %s%d%s\n", YELLOW, t, NC);
printf("\tk: %s%d%s\n", YELLOW, k, NC);
printf("\tseed: %s%d%s\n", YELLOW, seed, NC);
printf("\taction: %s%s%s\n", YELLOW, action, NC);
//TODO: make sure action is null-terminated before passing into strcmp
if (!strcmp(action, (const char*)"keygen")) {
run_keygen(n, p, t, w, seed, cpu, verbose);
}
else if (!strcmp(action, (const char*)"encrypt")) {
run_encryption_from_key(inputFileName, keyFile, outputFileName, n, t, cpu, verbose);
}
else if (!strcmp(action, (const char*)"decrypt")) {
//run_decrypt(inputFileName, outputFileName, n, p, t, w, seed, cpu, verbose);
}
else if (!strcmp(action, (const char*)"generate-message")) {
generate_message(outputFileName, k);
}
else if (test) {
test_hamc_e2e(n, p, t, w, seed, cpu, true);
}
else {
printf("action %s not recognized\n", action);
}
}
void printHelp(){
printf("\n\nHAMC - Hardware Accelerated Mceliece Cryptosystem\n\n");
printf("Run the program as such:\n");
printf(" ./hamc [arguments]\n\n");
printf("Available Arguments:\n");
printf("[X] denotes that an argument is required\n");
printf("\t-a [X] : actions: keygen encrypt decrypt test\n\n");
printf("\t-c : Run CPU based execution\n\n");
printf("\t-h : Print this help menu\n\n");
printf("\t-i [X] : input filename\n\n");
printf("\t-k [X] : key filename\n\n");
printf("\t-n [X] : Weight of generator matrix rows \n\n");
printf("\t-o [X] : output filename\n\n");
printf("\t-p [X] : Size of matrix during key generation\n\n");
printf("\t-s : Seed for random number generation\n\n");
printf("\t-t [X] : Weight of Error Matrix rows\n\n");
printf("\t-v : Verbose\n\n");
printf("\t-w [X] : Weight of QC_MDPC code\n\n");
printf("Example program execution:\n");
printf(" ./hamc -a test -n 2 -p 1024 -t 10 -w 30 -s 10\n");
printf(" ./hamc -a test -n 2 -p 500 -t 10 -w 30 -s 10\n");
printf(" ./hamc -a test -n 2 -p 500 -t 10 -w 30 -s 10 -c\n");
}
| 63f880b5d19763ae2610eab640384c392483d333.cu | #include <cuda_runtime.h>
#include <stdlib.h>
#include <wb.h>
#include <stdint.h>
#include <ctype.h>
#include <stdio.h>
#include <unistd.h>
#include "hamc_cpu_code.c"
#include "hamc_common.h"
#include "decrypt.cu"
#include "encrypt.cu"
#include "keygen.cu"
#include "hamc_e2e.cu"
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
void printHelp();
void printWelcome()
{
printf("HAMC Version %s0.1%s\n", YELLOW, NC);
printf("Developed by Mitchell Dzurick, Mitchell Russel, James Kuban");
}
int main(int argc, char *argv[]) {
printWelcome();
/* variables for timing operations */
/* input parameters */
int n = 2, p = 500, w = 30, t = 10, seed = 10;
char *outputFileName = NULL, *inputFileName = NULL, *action = NULL;
char *keyFile = NULL;
/* determines whether to run CPU based implementation, default no */
bool cpu = false;
bool verbose = false;
int c;
opterr = 0;
while ((c = getopt (argc, argv, "a:n:p:w:t:i:o:hs:cs:vk:")) != -1)
switch(c)
{
case 'k':
keyFile = strdup(optarg);
break;
case 'v':
verbose = true;
break;
case 'c':
cpu = true;
break;
case 'n':
n = atoi(optarg);
break;
case 's':
seed = atoi(optarg);
break;
case 'p':
p = atoi(optarg);
break;
case 'w':
w = atoi(optarg);
break;
case 't':
t = atoi(optarg);
break;
case 'i':
inputFileName = strdup(optarg);
break;
case 'o':
outputFileName = strdup(optarg);
break;
case 'a':
action = strdup(optarg);
break;
case 'h':
printHelp();
return(1);
default:
abort();
}
int k = (n - 1) * p;
bool test = false;
if (!strcmp(action, (const char*)"test")) test = true;
/* print input parameters */
printf("\n");
printf("Input Parameters:\n");
if (!test) printf("\tInput file: %s%s%s\n", YELLOW, inputFileName, NC);
if (!test) printf("\tOutput file: %s%s%s\n", YELLOW, outputFileName, NC);
printf("\tGPU based execution: ");
if (!cpu) printf("%son%s\n", GREEN, NC);
else printf("%soff%s\n", RED, NC);
printf("\tn: %s%d%s\n", YELLOW, n, NC);
printf("\tp: %s%d%s\n", YELLOW, p, NC);
printf("\tw: %s%d%s\n", YELLOW, w, NC);
printf("\tt: %s%d%s\n", YELLOW, t, NC);
printf("\tk: %s%d%s\n", YELLOW, k, NC);
printf("\tseed: %s%d%s\n", YELLOW, seed, NC);
printf("\taction: %s%s%s\n", YELLOW, action, NC);
//TODO: make sure action is null-terminated before passing into strcmp
if (!strcmp(action, (const char*)"keygen")) {
run_keygen(n, p, t, w, seed, cpu, verbose);
}
else if (!strcmp(action, (const char*)"encrypt")) {
run_encryption_from_key(inputFileName, keyFile, outputFileName, n, t, cpu, verbose);
}
else if (!strcmp(action, (const char*)"decrypt")) {
//run_decrypt(inputFileName, outputFileName, n, p, t, w, seed, cpu, verbose);
}
else if (!strcmp(action, (const char*)"generate-message")) {
generate_message(outputFileName, k);
}
else if (test) {
test_hamc_e2e(n, p, t, w, seed, cpu, true);
}
else {
printf("action %s not recognized\n", action);
}
}
void printHelp(){
printf("\n\nHAMC - Hardware Accelerated Mceliece Cryptosystem\n\n");
printf("Run the program as such:\n");
printf(" ./hamc [arguments]\n\n");
printf("Available Arguments:\n");
printf("[X] denotes that an argument is required\n");
printf("\t-a [X] : actions: keygen encrypt decrypt test\n\n");
printf("\t-c : Run CPU based execution\n\n");
printf("\t-h : Print this help menu\n\n");
printf("\t-i [X] : input filename\n\n");
printf("\t-k [X] : key filename\n\n");
printf("\t-n [X] : Weight of generator matrix rows \n\n");
printf("\t-o [X] : output filename\n\n");
printf("\t-p [X] : Size of matrix during key generation\n\n");
printf("\t-s : Seed for random number generation\n\n");
printf("\t-t [X] : Weight of Error Matrix rows\n\n");
printf("\t-v : Verbose\n\n");
printf("\t-w [X] : Weight of QC_MDPC code\n\n");
printf("Example program execution:\n");
printf(" ./hamc -a test -n 2 -p 1024 -t 10 -w 30 -s 10\n");
printf(" ./hamc -a test -n 2 -p 500 -t 10 -w 30 -s 10\n");
printf(" ./hamc -a test -n 2 -p 500 -t 10 -w 30 -s 10 -c\n");
}
|
56d4eff8e153b76503d27e96c1d2f925c9f23189.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex_V3.h"
#include "normXcorr_GPUKernel_V3.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *qualityH);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
/*SoA_Corr *CorrH;
CorrH = (SoA_Corr *)malloc(sizeof(SoA_Corr)*DisplacementSize);
for(int k=0;k<DisplacementSize;k++){
CorrH[k].Corr_Points = (float*) malloc(Corr_size*sizeof(float));
}*/
float *CorrH;
CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
float *qualityH;
qualityH = (float*) malloc(sizeof(float)*parameters.numX*parameters.numY);
float elapsedTime_inc;
hipEvent_t startEvent_inc, stopEvent_inc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
hipEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters, qualityH); // Execution Model for GPU is set up in this function
hipEventRecord(stopEvent_inc,0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
/*for(int h=0;h<DisplacementSize;h++){
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
}*/
/*for(int h=0;h<DisplacementSize;h++){
for(int g=0;g<SEARCH_Y;g++){
for(int z=0;z<SEARCH_X;z++){
printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]);
}
printf("\n");
}
printf("\n");
}*/
printf("\n");
// Printing for Quality Verification
printf("%f\n",qualityH[0]);
printf("Elasted Time = %f\n",gpuTime);
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *qualityH)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
hipMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
hipMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Temporary host corr to find max
float *tempCorrHost;
int modx = parameters.searchX%2;
int mody = parameters.searchY%2;
hipMalloc((void **)&tempCorrHost,sizeof(float)*(parameters.searchX+modx)*(parameters.searchY+mody)*parameters.numX*parameters.numY);
// CC Value Matrix
float *qualityD;
hipMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//hipMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
//dim3 dimBlock(2*parameters.searchX, 2*parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
//int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float);
// Launch the device computation threads!
//normXcorr_GPU<<<dimGrid, dimBlock,sharedmemsize>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar);
hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0, Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar,tempCorrHost,qualityD);
//Copting SoA from Device to Host
//CopyFromDeviceMatrix(Corr, Corrd);
//hipMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost);
hipMemcpy(qualityH,qualityD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
hipFree(CorrD);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
//fp = fopen("trialNumbers.inp","r");
fp = fopen("Real_Data_US.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
//int n_bytes = size*sizeof(float);
//hipHostMalloc((void**)&M.elements,n_bytes);
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
| 56d4eff8e153b76503d27e96c1d2f925c9f23189.cu | // Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex_V3.h"
#include "normXcorr_GPUKernel_V3.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters,float *qualityH);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
/*SoA_Corr *CorrH;
CorrH = (SoA_Corr *)malloc(sizeof(SoA_Corr)*DisplacementSize);
for(int k=0;k<DisplacementSize;k++){
CorrH[k].Corr_Points = (float*) malloc(Corr_size*sizeof(float));
}*/
float *CorrH;
CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
float *qualityH;
qualityH = (float*) malloc(sizeof(float)*parameters.numX*parameters.numY);
float elapsedTime_inc;
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters, qualityH); // Execution Model for GPU is set up in this function
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
/*for(int h=0;h<DisplacementSize;h++){
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
}*/
/*for(int h=0;h<DisplacementSize;h++){
for(int g=0;g<SEARCH_Y;g++){
for(int z=0;z<SEARCH_X;z++){
printf("%f ",CorrH[(h*SEARCH_Y+g)*SEARCH_X+z]);
}
printf("\n");
}
printf("\n");
}*/
printf("\n");
// Printing for Quality Verification
printf("%f\n",qualityH[0]);
printf("Elasted Time = %f\n",gpuTime);
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters,float *qualityH)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
cudaMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
cudaMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Temporary host corr to find max
float *tempCorrHost;
int modx = parameters.searchX%2;
int mody = parameters.searchY%2;
cudaMalloc((void **)&tempCorrHost,sizeof(float)*(parameters.searchX+modx)*(parameters.searchY+mody)*parameters.numX*parameters.numY);
// CC Value Matrix
float *qualityD;
cudaMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//cudaMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
//dim3 dimBlock(2*parameters.searchX, 2*parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
//int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float);
// Launch the device computation threads!
//normXcorr_GPU<<<dimGrid, dimBlock,sharedmemsize>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar);
normXcorr_GPU<<<dimGrid, dimBlock>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar,tempCorrHost,qualityD);
//Copting SoA from Device to Host
//CopyFromDeviceMatrix(Corr, Corrd);
//cudaMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost);
cudaMemcpy(qualityH,qualityD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
cudaFree(CorrD);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
//fp = fopen("trialNumbers.inp","r");
fp = fopen("Real_Data_US.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
//int n_bytes = size*sizeof(float);
//cudaMallocHost((void**)&M.elements,n_bytes);
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
4123c88938c5ceeab90be2f44b19f1e86df52dd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
| 4123c88938c5ceeab90be2f44b19f1e86df52dd4.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockDim.x * blockIdx.x);
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
|
ce56ca5e2627559c5030083bb1eb7950cff93bf2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "functions.c"
__device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
__device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprodC(aExpb, z, p);
z = modprodC(z, z, p);
b /= 2;
}
return aExpb;
}
__global__ void findX(unsigned int p, unsigned int g, unsigned int h, unsigned int *x)
{
//unsigned int block = blockIdx.x;
//unsigned int blocksize = blockDim.x;
//unsigned int thread = threadIdx.x;
//unsigned int id=thread + block*blocksize;
if (*x==0 || modExpC(g,*x,p)!=h) {
printf("Finding the secret key...\n");
double startTime = clock();
for (unsigned int i=0;i<p-1;i++) {
if (modExpC(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i+1);
*x=i+1;
}
}
double endTime = clock();
double totalTime = (endTime-startTime)/CLOCKS_PER_SEC;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
}
}
int main (int argc, char **argv) {
/* Part 2. Start this program by first copying the contents of the main function from
your completed decrypt.c main function. */
/* Q4 Make the search for the secret key parallel on the GPU using CUDA. */
//declare storage for an ElGamal cryptosytem
unsigned int n, p, g, h, x;
unsigned int Nints;
//get the secret key from the user
printf("Enter the secret key (0 if unknown): "); fflush(stdout);
char stat = scanf("%u",&x);
printf("Reading file.\n");
/* Q3 Complete this function. Read in the public key data from public_key.txt
and the cyphertexts from messages.txt. */
FILE *pub_key = fopen("public_key.txt","r");
FILE *cyperT = fopen("message.txt","r");
fscanf(pub_key,"%u\n%u\n%u\n%u",&n,&p,&g,&h);
fclose(pub_key);
fscanf(cyperT,"%u\n",&Nints);
unsigned int *a=(unsigned int *) malloc(Nints*sizeof(unsigned int));
unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int));
for(unsigned int i=0;i<Nints;i++)
{
fscanf(cyperT,"%u %u\n",&Zmessage[i],&a[i]);
}
fclose(cyperT);
// find the secret key
unsigned int Nthreads = Nints;
unsigned int Nblocks = (n+Nthreads-1)/Nthreads;
hipMalloc((void**)&x,1*sizeof(unsigned int));
printf("%u\n",x);hipLaunchKernelGGL((
findX), dim3(Nthreads),dim3(Nblocks) , 0, 0, p,g,h,&x);
//hipDeviceSynchronize();
printf("x:%u\n",x);
unsigned int foundx;
/* Q3 After finding the secret key, decrypt the message */
hipMemcpy(&x,&foundx,1*sizeof(unsigned int),hipMemcpyHostToDevice);
printf("secret key:%u\n",foundx);
ElGamalDecrypt(Zmessage,a,Nints,p,foundx);
unsigned char *message = (unsigned char *) malloc(Nints*sizeof(unsigned char));
unsigned int charsPerInt = (n-1)/8;
unsigned int Nchars = Nints*charsPerInt;
convertZToString(Zmessage,Nints,message,Nchars);
printf("Decrypted Message = \"%s\"\n",message);
hipFree(&x);
return 0;
}
| ce56ca5e2627559c5030083bb1eb7950cff93bf2.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "cuda.h"
#include "functions.c"
__device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
__device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprodC(aExpb, z, p);
z = modprodC(z, z, p);
b /= 2;
}
return aExpb;
}
__global__ void findX(unsigned int p, unsigned int g, unsigned int h, unsigned int *x)
{
//unsigned int block = blockIdx.x;
//unsigned int blocksize = blockDim.x;
//unsigned int thread = threadIdx.x;
//unsigned int id=thread + block*blocksize;
if (*x==0 || modExpC(g,*x,p)!=h) {
printf("Finding the secret key...\n");
double startTime = clock();
for (unsigned int i=0;i<p-1;i++) {
if (modExpC(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i+1);
*x=i+1;
}
}
double endTime = clock();
double totalTime = (endTime-startTime)/CLOCKS_PER_SEC;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
}
}
int main (int argc, char **argv) {
/* Part 2. Start this program by first copying the contents of the main function from
your completed decrypt.c main function. */
/* Q4 Make the search for the secret key parallel on the GPU using CUDA. */
//declare storage for an ElGamal cryptosytem
unsigned int n, p, g, h, x;
unsigned int Nints;
//get the secret key from the user
printf("Enter the secret key (0 if unknown): "); fflush(stdout);
char stat = scanf("%u",&x);
printf("Reading file.\n");
/* Q3 Complete this function. Read in the public key data from public_key.txt
and the cyphertexts from messages.txt. */
FILE *pub_key = fopen("public_key.txt","r");
FILE *cyperT = fopen("message.txt","r");
fscanf(pub_key,"%u\n%u\n%u\n%u",&n,&p,&g,&h);
fclose(pub_key);
fscanf(cyperT,"%u\n",&Nints);
unsigned int *a=(unsigned int *) malloc(Nints*sizeof(unsigned int));
unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int));
for(unsigned int i=0;i<Nints;i++)
{
fscanf(cyperT,"%u %u\n",&Zmessage[i],&a[i]);
}
fclose(cyperT);
// find the secret key
unsigned int Nthreads = Nints;
unsigned int Nblocks = (n+Nthreads-1)/Nthreads;
cudaMalloc((void**)&x,1*sizeof(unsigned int));
printf("%u\n",x);
findX<<< Nthreads,Nblocks >>>(p,g,h,&x);
//cudaDeviceSynchronize();
printf("x:%u\n",x);
unsigned int foundx;
/* Q3 After finding the secret key, decrypt the message */
cudaMemcpy(&x,&foundx,1*sizeof(unsigned int),cudaMemcpyHostToDevice);
printf("secret key:%u\n",foundx);
ElGamalDecrypt(Zmessage,a,Nints,p,foundx);
unsigned char *message = (unsigned char *) malloc(Nints*sizeof(unsigned char));
unsigned int charsPerInt = (n-1)/8;
unsigned int Nchars = Nints*charsPerInt;
convertZToString(Zmessage,Nints,message,Nchars);
printf("Decrypted Message = \"%s\"\n",message);
cudaFree(&x);
return 0;
}
|
5a5b23dc5aca8896fe5c4d4095f6c01f8e8f83b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* a simple test
*/
__shared__ float data1[32][32];
__shared__ float data2[32][32];
__shared__ float data3[32][32];
__device__ void mult(__shared__ float d1[32][32],
__shared__ float d2[32][32],
__shared__ float d3[32][32],
int idx)
{
int i;
for (i = 0; i < 31; i+=2) {
d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] +
d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1];
d1[idx][i+1] = d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1] +
d2[idx+1][i+2] + d2[idx][i+2] + d2[idx-1][i+2];
i+= 1;
d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] +
d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1];
d1[idx][i+1] = d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1] +
d2[idx+1][i+2] + d2[idx][i+2] + d2[idx-1][i+2];
}
}
__global__ void doit(int start, int end) {
int i;
for (i = start; i < end; i++) {
mult(data1, data2, data3, i);
}
}
| 5a5b23dc5aca8896fe5c4d4095f6c01f8e8f83b5.cu | /*
* a simple test
*/
__shared__ float data1[32][32];
__shared__ float data2[32][32];
__shared__ float data3[32][32];
__device__ void mult(__shared__ float d1[32][32],
__shared__ float d2[32][32],
__shared__ float d3[32][32],
int idx)
{
int i;
for (i = 0; i < 31; i+=2) {
d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] +
d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1];
d1[idx][i+1] = d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1] +
d2[idx+1][i+2] + d2[idx][i+2] + d2[idx-1][i+2];
i+= 1;
d1[idx][i] = d2[idx+1][i-1] + d2[idx][i-1] + d2[idx-1][i-1] +
d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1];
d1[idx][i+1] = d2[idx+1][i] + d2[idx][i] + d2[idx-1][i] +
d2[idx+1][i+1] + d2[idx][i+1] + d2[idx-1][i+1] +
d2[idx+1][i+2] + d2[idx][i+2] + d2[idx-1][i+2];
}
}
__global__ void doit(int start, int end) {
int i;
for (i = start; i < end; i++) {
mult(data1, data2, data3, i);
}
}
|
48c8d9ef076e178d741929019f5bd63679a274d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define N 2 //64
__device__ float multiplyByTwo(float *v, unsigned int tid) {
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid) {
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foor(float *v, unsigned int size, unsigned int i) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f;
__requires (i == 1 || i == 2)
/*** ESBMC_assert (i = 1 || i = 2); ***/
__assert(i == 1 || i == 2);
if (i == 1)
f = multiplyByTwo;
else if (i == 2)
f = divideByTwo;
else
f = NULL;
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
v[tid] = x;
}
}
| 48c8d9ef076e178d741929019f5bd63679a274d6.cu | #include <stdio.h>
#include "cuda.h"
#define N 2 //64
__device__ float multiplyByTwo(float *v, unsigned int tid) {
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid) {
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foor(float *v, unsigned int size, unsigned int i) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f;
__requires (i == 1 || i == 2)
/*** ESBMC_assert (i = 1 || i = 2); ***/
__assert(i == 1 || i == 2);
if (i == 1)
f = multiplyByTwo;
else if (i == 2)
f = divideByTwo;
else
f = NULL;
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
v[tid] = x;
}
}
|
86d8ef622016bf39dfe586b6f8cb8a6f112c8ecf.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "MyFunctions.h"
#include "CharacterList.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
static const unsigned W = 1920;
static const unsigned H = 1080;
static const int char_w = 12;
static const int char_h = 24;
static const int w_num = 160;
static const int h_num = 45;
class Rain
{
public:
int pos;
int leng;
Rain()
{
pos = rand() % w_num;
leng = (rand() % 10) + 5;
}
};
__global__ void render(uint8_t *yuv, bool d_pos_occu[], int d_bright_time[], bool d_not_empty[], Character d_character_i[])
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int col = idx % w_num;
int row = idx / w_num;
if (col < w_num && row < h_num)
{
int pixel_idx_start = (char_h * W * row) + (char_w * col);
int color_idx_start = (W * H) + (char_h/2 * W/2 * row) + (char_w/2 * col);
// render
for (int i = 0; i < char_h; i++)
{
for (int j = 0; j < char_w; j++)
{
if(d_character_i[idx].bitmap[i][j] == true)
yuv[pixel_idx_start + (W * i) + j] = 255 * d_bright_time[idx] / 20;
else
yuv[pixel_idx_start + (W * i) + j] = 0;
}
}
for (int i = 0; i < char_h/2; i++)
{
for (int j = 0; j < char_w/2; j++)
{
if(d_character_i[idx].bitmap[i*2][j*2])
yuv[color_idx_start + (W/2 * i) + j] = 128 - (0.331 * 255 * d_bright_time[idx] / 20) ;
else
yuv[color_idx_start + (W/2 * i) + j] = 128;
}
}
for (int i = 0; i < char_h / 2; i++)
{
for (int j = 0; j < char_w / 2; j++)
{
if(d_character_i[idx].bitmap[i * 2][j * 2])
yuv[color_idx_start + (W * H / 4) + (W/2 * i) + j] = 128 - (0.419 * 255 * d_bright_time[idx] / 20);
else
yuv[color_idx_start + (W * H / 4) + (W/2 * i) + j] = 128;
}
}
// prepare next round
int temp = d_bright_time[idx];
if (row >= 0 && row < (h_num - 1))
{
d_bright_time[idx + w_num] = temp;
}
if (row == 0)
{
if (d_bright_time[idx] > 0)
{
d_bright_time[idx] = d_bright_time[idx] - 1;
}
}
// set occupancy of every position
if (d_bright_time[idx] > 0)
{
d_not_empty[col] = true;
}
if (d_not_empty[col] == true)
{
d_pos_occu[col] = true;
}
else
{
d_pos_occu[col] = false;
}
}
}
void RainFall(uint8_t *yuv)
{
static bool pos_occu[w_num] = { false };
static int bright_time[w_num * h_num] = { 0 };
static int char_id[w_num * h_num];
static Character character_i[w_num * h_num];
for (int i = 0; i < (w_num * h_num); i++)
{
char_id[i] = rand() % 10;
Character character_temp(char_id[i]);
character_i[i] = character_temp;
}
int blockNum = ((w_num * h_num + 1) / 512) + 1;
Rain *rain_i = new Rain[2];
for (int i = 0; i < 2; i++)
{
if (pos_occu[rain_i[i].pos] == false)
{
bright_time[rain_i[i].pos] = rain_i[i].leng;
}
}
bool *d_pos_occu;
int *d_bright_time;
bool *d_not_empty;
Character *d_character_i;
hipMalloc(&d_pos_occu, w_num * sizeof(bool));
hipMalloc(&d_bright_time, w_num * h_num * sizeof(int));
hipMalloc(&d_not_empty, w_num * sizeof(bool));
hipMalloc(&d_character_i, w_num * h_num * sizeof(Character));
hipMemset(d_pos_occu, false, w_num * sizeof(bool));
hipMemset(d_bright_time, 0, w_num * h_num * sizeof(int));
hipMemset(d_not_empty, false, w_num * sizeof(bool));
hipMemcpy(d_pos_occu, pos_occu, w_num * sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(d_bright_time, bright_time, w_num * h_num * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_character_i, character_i, w_num * h_num * sizeof(Character), hipMemcpyHostToDevice);
render << < blockNum, 512 >> > (yuv, d_pos_occu, d_bright_time, d_not_empty, d_character_i);
hipMemcpy(pos_occu, d_pos_occu, w_num * sizeof(bool), hipMemcpyDeviceToHost);
hipMemcpy(bright_time, d_bright_time, w_num * h_num * sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_pos_occu);
hipFree(d_bright_time);
hipFree(d_not_empty);
hipFree(d_character_i);
} | 86d8ef622016bf39dfe586b6f8cb8a6f112c8ecf.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "MyFunctions.h"
#include "CharacterList.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
static const unsigned W = 1920;
static const unsigned H = 1080;
static const int char_w = 12;
static const int char_h = 24;
static const int w_num = 160;
static const int h_num = 45;
class Rain
{
public:
int pos;
int leng;
Rain()
{
pos = rand() % w_num;
leng = (rand() % 10) + 5;
}
};
__global__ void render(uint8_t *yuv, bool d_pos_occu[], int d_bright_time[], bool d_not_empty[], Character d_character_i[])
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int col = idx % w_num;
int row = idx / w_num;
if (col < w_num && row < h_num)
{
int pixel_idx_start = (char_h * W * row) + (char_w * col);
int color_idx_start = (W * H) + (char_h/2 * W/2 * row) + (char_w/2 * col);
// render
for (int i = 0; i < char_h; i++)
{
for (int j = 0; j < char_w; j++)
{
if(d_character_i[idx].bitmap[i][j] == true)
yuv[pixel_idx_start + (W * i) + j] = 255 * d_bright_time[idx] / 20;
else
yuv[pixel_idx_start + (W * i) + j] = 0;
}
}
for (int i = 0; i < char_h/2; i++)
{
for (int j = 0; j < char_w/2; j++)
{
if(d_character_i[idx].bitmap[i*2][j*2])
yuv[color_idx_start + (W/2 * i) + j] = 128 - (0.331 * 255 * d_bright_time[idx] / 20) ;
else
yuv[color_idx_start + (W/2 * i) + j] = 128;
}
}
for (int i = 0; i < char_h / 2; i++)
{
for (int j = 0; j < char_w / 2; j++)
{
if(d_character_i[idx].bitmap[i * 2][j * 2])
yuv[color_idx_start + (W * H / 4) + (W/2 * i) + j] = 128 - (0.419 * 255 * d_bright_time[idx] / 20);
else
yuv[color_idx_start + (W * H / 4) + (W/2 * i) + j] = 128;
}
}
// prepare next round
int temp = d_bright_time[idx];
if (row >= 0 && row < (h_num - 1))
{
d_bright_time[idx + w_num] = temp;
}
if (row == 0)
{
if (d_bright_time[idx] > 0)
{
d_bright_time[idx] = d_bright_time[idx] - 1;
}
}
// set occupancy of every position
if (d_bright_time[idx] > 0)
{
d_not_empty[col] = true;
}
if (d_not_empty[col] == true)
{
d_pos_occu[col] = true;
}
else
{
d_pos_occu[col] = false;
}
}
}
void RainFall(uint8_t *yuv)
{
static bool pos_occu[w_num] = { false };
static int bright_time[w_num * h_num] = { 0 };
static int char_id[w_num * h_num];
static Character character_i[w_num * h_num];
for (int i = 0; i < (w_num * h_num); i++)
{
char_id[i] = rand() % 10;
Character character_temp(char_id[i]);
character_i[i] = character_temp;
}
int blockNum = ((w_num * h_num + 1) / 512) + 1;
Rain *rain_i = new Rain[2];
for (int i = 0; i < 2; i++)
{
if (pos_occu[rain_i[i].pos] == false)
{
bright_time[rain_i[i].pos] = rain_i[i].leng;
}
}
bool *d_pos_occu;
int *d_bright_time;
bool *d_not_empty;
Character *d_character_i;
cudaMalloc(&d_pos_occu, w_num * sizeof(bool));
cudaMalloc(&d_bright_time, w_num * h_num * sizeof(int));
cudaMalloc(&d_not_empty, w_num * sizeof(bool));
cudaMalloc(&d_character_i, w_num * h_num * sizeof(Character));
cudaMemset(d_pos_occu, false, w_num * sizeof(bool));
cudaMemset(d_bright_time, 0, w_num * h_num * sizeof(int));
cudaMemset(d_not_empty, false, w_num * sizeof(bool));
cudaMemcpy(d_pos_occu, pos_occu, w_num * sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(d_bright_time, bright_time, w_num * h_num * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_character_i, character_i, w_num * h_num * sizeof(Character), cudaMemcpyHostToDevice);
render << < blockNum, 512 >> > (yuv, d_pos_occu, d_bright_time, d_not_empty, d_character_i);
cudaMemcpy(pos_occu, d_pos_occu, w_num * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(bright_time, d_bright_time, w_num * h_num * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_pos_occu);
cudaFree(d_bright_time);
cudaFree(d_not_empty);
cudaFree(d_character_i);
} |
f6cca8edc16f811480bd58b8f38bf5a270edd3cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 124))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(124, "update_halo_kernel4_plus_2_b");
OPS_kernels[124].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[124].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_b), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[124].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[124].mpi_time += t2 - t1;
OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| f6cca8edc16f811480bd58b8f38bf5a270edd3cc.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 124))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(124, "update_halo_kernel4_plus_2_b");
OPS_kernels[124].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[124].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_2_b<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[124].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[124].mpi_time += t2 - t1;
OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
1cd4177b5b5666fc722aa027a65b0882b6eba0ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Program to implement a SAT solver using the DPLL algorithm with unit
* propagation Sukrut Rao CS15BTECH11036
*/
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
using namespace std;
/*
* enum for different types of return flags defined
*/
enum Cat {
satisfied, // when a satisfying assignment has been found
unsatisfied, // when no satisfying assignment has been found after
// exhaustively searching
normal, // when no satisfying assignment has been found till now, and DPLL()
// has exited normally
completed // when the DPLL algorithm has completed execution
};
/*
* class to represent a boolean formula
*/
class Formula {
public:
// a vector that stores the value assigned to each variable, where
// -1 - unassigned
// 0 - true
// 1 - false
vector < int > literals;
vector < int > literal_frequency; // vector to store the number of occurrences of
// each literal
// vector to store the difference in number of occurrences with
// positive and negative polarity of each literal
vector < int > literal_polarity;
// vector to store the clauses
// for each clauses, if the variable n is of positive polarity, then 2n is
// stored if the variable n is of negative polarity, then 2n+1 is stored here,
// n is assumed to be zero indexed
vector < vector < int >> clauses;
Formula() {}
// copy constructor for copying a formula - each member is copied over
Formula(const Formula & f) {
literals = f.literals;
clauses = f.clauses;
literal_frequency = f.literal_frequency;
literal_polarity = f.literal_polarity;
}
};
/*
* class to represent the structure and functions of the SAT Solver
*/
class SATSolverDPLL {
private:
Formula formula; // the initial formula given as input
int literal_count; // the number of variables in the formula
int clause_count; // the number of clauses in the formula
int unit_propagate(Formula & ); // performs unit propagation
int DPLL(Formula); // performs DPLL recursively
int apply_transform(Formula & ,
int); // applies the value of the literal in every clause
void show_result(Formula & , int); // displays the result
public:
SATSolverDPLL() {}
void initialize(std::string); // intiializes the values
void solve(); // calls the solver
};
/*
* function that accepts the inputs from the user and initializes the attributes
* in the solver
*/
void SATSolverDPLL::initialize(std::string filename) {
char c; // store first character
string s; // dummy string
std::ifstream in(filename);
std::streambuf *cinbuf = std::cin.rdbuf(); //save old buf
std::cin.rdbuf(in.rdbuf()); //redirect std::cin to in.txt!
while (true) {
cin >> c;
if (c == 'c') // if comment
{
getline(cin, s); // ignore
} else // else, if would be a p
{
cin >> s; // this would be cnf
break;
}
}
cin >> literal_count;
cin >> clause_count;
// set the vectors to their appropriate sizes and initial values
formula.literals.clear();
formula.literals.resize(literal_count, -1);
formula.clauses.clear();
formula.clauses.resize(clause_count);
formula.literal_frequency.clear();
formula.literal_frequency.resize(literal_count, 0);
formula.literal_polarity.clear();
formula.literal_polarity.resize(literal_count, 0);
int literal; // store the incoming literal value
// iterate over the clauses
for (int i = 0; i < clause_count; i++) {
while (true) // while the ith clause gets more literals
{
cin >> literal;
if (literal > 0) // if the variable has positive polarity
{
formula.clauses[i].push_back(2 *
(literal - 1)); // store it in the form 2n
// increment frequency and polarity of the literal
formula.literal_frequency[literal - 1]++;
formula.literal_polarity[literal - 1]++;
} else if (literal < 0) // if the variable has negative polarity
{
formula.clauses[i].push_back(2 * ((-1) * literal - 1) +
1); // store it in the form 2n+1
// increment frequency and decrement polarity of the literal
formula.literal_frequency[-1 - literal]++;
formula.literal_polarity[-1 - literal]--;
} else {
break; // read 0, so move to next clause
}
}
}
}
/*
* function to perform unit resolution in a given formula
* arguments: f - the formula to perform unit resolution on
* return value: int - the status of the solver after unit resolution, a member
* of the Cat enum Cat::satisfied - the formula has been satisfied
* Cat::unsatisfied - the formula can no longer be satisfied
* Cat::normal - normal exit
*/
__global__ void compute(int * ret_literals, int * literals, int * clauses, int * clauses_size, int * literal_frequency) {
int block = blockIdx.x;
if (clauses_size[block] == 1) {
literals[clauses[block] / 2] = clauses[block] % 2;
literal_frequency[clauses[block] / 2] = -1; // once assigned, reset the frequency to mark it closed
ret_literals[block] = clauses[block];
} else {
ret_literals[block] = 0;
}
}
int SATSolverDPLL::unit_propagate(Formula & f) {
bool unit_clause_found =
false; // stores whether the current iteration found a unit clause
if (f.clauses.size() == 0) // if the formula contains no clauses
{
return Cat::satisfied; // it is vacuously satisfied
}
int * ret_literals, * literals, * clauses, * clauses_size, * literal_frequency;
do {
unit_clause_found = false;
hipMalloc((void ** ) & ret_literals, sizeof(int) * f.clauses.size());
hipMalloc((void ** ) & literals, sizeof(int) * f.literals.size());
hipMalloc((void ** ) & clauses_size, sizeof(int) * f.clauses.size());
hipMalloc((void ** ) & clauses, sizeof(int) * f.clauses.size());
hipMalloc((void ** ) & literal_frequency, sizeof(int) * f.literal_frequency.size());
hipMemcpy(literals, & f.literals[0], sizeof(int) * f.literals.size(), hipMemcpyHostToDevice);
hipMemcpy(literal_frequency, & f.literal_frequency[0], sizeof(int) * f.literal_frequency.size(), hipMemcpyHostToDevice);
int hclauses_size[f.clauses.size()];
int hclauses[f.clauses.size()];
int hret_literals[f.clauses.size()];
for (int i = 0; i < f.clauses.size(); i++) {
hclauses_size[i] = f.clauses[i].size();
if (hclauses_size[i] == 0) {
hipFree(ret_literals);
hipFree(literals);
hipFree(clauses_size);
hipFree(clauses);
hipFree(literal_frequency);
return Cat::unsatisfied;
}
hclauses[i] = f.clauses[i][0];
}
hipMemcpy(clauses_size, & hclauses_size, sizeof(int) * f.clauses.size(), hipMemcpyHostToDevice);
hipMemcpy(clauses, & hclauses, sizeof(int) * f.clauses.size(), hipMemcpyHostToDevice);
compute << < f.clauses.size(), 1 >>> (ret_literals, literals, clauses, clauses_size, literal_frequency);
hipMemcpy( & hret_literals, ret_literals, sizeof(int) * f.clauses.size(), hipMemcpyDeviceToHost);
hipMemcpy( & f.literals[0], literals, sizeof(int) * f.literals.size(), hipMemcpyDeviceToHost);
hipMemcpy( & f.literal_frequency[0], literal_frequency, sizeof(int) * f.literal_frequency.size(), hipMemcpyDeviceToHost);
for(int i = 0; i < f.clauses.size(); i++)
{
if(hret_literals[i] == 0)
{
continue;
}
else
{
unit_clause_found = true;
int result = apply_transform(f, hret_literals[i] / 2);
if (result == Cat::satisfied || result == Cat::unsatisfied)
{
return result;
}
}
}
hipFree(ret_literals);
hipFree(literals);
hipFree(clauses_size);
hipFree(clauses);
hipFree(literal_frequency);
} while (unit_clause_found);
return Cat::normal; // if reached here, the unit resolution ended normally
}
/*
* applies a value of a literal to all clauses in a given formula
* arguments: f - the formula to apply on
* literal_to_apply - the literal which has just been set
* return value: int - the return status flag, a member of the Cat enum
* Cat::satisfied - the formula has been satisfied
* Cat::unsatisfied - the formula can no longer be satisfied
* Cat::normal - normal exit
*/
int SATSolverDPLL::apply_transform(Formula & f, int literal_to_apply) {
int value_to_apply = f.literals[literal_to_apply]; // the value to apply, 0 -
// if true, 1 - if false
// iterate over the clauses in f
for (int i = 0; i < f.clauses.size(); i++) {
// iterate over the variables in the clause
for (int j = 0; j < f.clauses[i].size(); j++) {
// if this is true, then the literal appears with the same polarity as it
// is being applied that is, if assigned true, it appears positive if
// assigned false, it appears negative, in this clause hence, the clause
// has now become true
if ((2 * literal_to_apply + value_to_apply) == f.clauses[i][j]) {
f.clauses.erase(f.clauses.begin() +
i); // remove the clause from the list
i--; // reset iterator
if (f.clauses.size() ==
0) // if all clauses have been removed, the formula is satisfied
{
return Cat::satisfied;
}
break; // move to the next clause
} else if (f.clauses[i][j] / 2 ==
literal_to_apply) // the literal appears with opposite polarity
{
f.clauses[i].erase(
f.clauses[i].begin() +
j); // remove the literal from the clause, as it is false in it
j--; // reset the iterator
if (f.clauses[i].size() ==
0) // if the clause is empty, the formula is unsatisfiable currently
{
return Cat::unsatisfied;
}
break; // move to the next clause
}
}
}
// if reached here, the function is exiting normally
return Cat::normal;
}
/*
* function to perform the recursive DPLL on a given formula
* argument: f - the formula to perform DPLL on
* return value: int - the return status flag, a member of the Cat enum
* Cat::normal - exited normally
* Cat::completed - result has been found, exit recursion all the
* way
*/
int SATSolverDPLL::DPLL(Formula f) {
int result = unit_propagate(f); // perform unit propagation on the formula
if (result == Cat::satisfied) // if formula satisfied, show result and return
{
show_result(f, result);
return Cat::completed;
} else if (result == Cat::unsatisfied) // if formula not satisfied in this
// branch, return normally
{
return Cat::normal;
}
// find the variable with maximum frequency in f, which will be the next to be
// assigned a value already assigned variables have this field reset to -1 in
// order to ignore them
int i = distance(
f.literal_frequency.begin(),
max_element(f.literal_frequency.begin(), f.literal_frequency.end()));
// need to apply twice, once true, the other false
for (int j = 0; j < 2; j++) {
Formula new_f = f; // copy the formula before recursing
if (new_f.literal_polarity[i] >
0) // if the number of literals with positive polarity are greater
{
new_f.literals[i] = j; // assign positive first
} else // if not
{
new_f.literals[i] = (j + 1) % 2; // assign negative first
}
new_f.literal_frequency[i] = -1; // reset the frequency to -1 to ignore in the future
int transform_result =
apply_transform(new_f, i); // apply the change to all the clauses
if (transform_result ==
Cat::satisfied) // if formula satisfied, show result and return
{
show_result(new_f, transform_result);
return Cat::completed;
} else if (transform_result == Cat::unsatisfied) // if formula not satisfied
// in this branch, return
// normally
{
continue;
}
int dpll_result = DPLL(new_f); // recursively call DPLL on the new formula
if (dpll_result == Cat::completed) // propagate the result, if completed
{
return dpll_result;
}
}
// if the control reaches here, the function has returned normally
return Cat::normal;
}
/*
* function to display the result of the solver
* arguments: f - the formula when it was satisfied or shown to be unsatisfiable
* result - the result flag, a member of the Cat enum
*/
void SATSolverDPLL::show_result(Formula & f, int result) {
if (result == Cat::satisfied) // if the formula is satisfiable
{
cout << "SAT" << endl;
for (int i = 0; i < f.literals.size(); i++) {
if (i != 0) {
cout << " ";
}
if (f.literals[i] != -1) {
cout << pow(-1, f.literals[i]) * (i + 1);
} else // for literals which can take either value, arbitrarily assign
// them to be true
{
cout << (i + 1);
}
}
cout << " 0";
} else // if the formula is unsatisfiable
{
cout << "UNSAT";
}
}
/*
* function to call the solver
*/
void SATSolverDPLL::solve() {
int result = DPLL(formula); // final result of DPLL on the original formula
// if normal return till the end, then the formula could not be satisfied in
// any branch, so it is unsatisfiable
if (result == Cat::normal) {
show_result(formula, Cat::unsatisfied); // the argument formula is a dummy
// here, the result is UNSAT
}
}
int main() {
SATSolverDPLL solver; // create the solver
solver.initialize("problem1.cnf"); // initialize
solver.solve(); // solve
return 0;
} | 1cd4177b5b5666fc722aa027a65b0882b6eba0ea.cu | /*
* Program to implement a SAT solver using the DPLL algorithm with unit
* propagation Sukrut Rao CS15BTECH11036
*/
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
using namespace std;
/*
* enum for different types of return flags defined
*/
enum Cat {
satisfied, // when a satisfying assignment has been found
unsatisfied, // when no satisfying assignment has been found after
// exhaustively searching
normal, // when no satisfying assignment has been found till now, and DPLL()
// has exited normally
completed // when the DPLL algorithm has completed execution
};
/*
* class to represent a boolean formula
*/
class Formula {
public:
// a vector that stores the value assigned to each variable, where
// -1 - unassigned
// 0 - true
// 1 - false
vector < int > literals;
vector < int > literal_frequency; // vector to store the number of occurrences of
// each literal
// vector to store the difference in number of occurrences with
// positive and negative polarity of each literal
vector < int > literal_polarity;
// vector to store the clauses
// for each clauses, if the variable n is of positive polarity, then 2n is
// stored if the variable n is of negative polarity, then 2n+1 is stored here,
// n is assumed to be zero indexed
vector < vector < int >> clauses;
Formula() {}
// copy constructor for copying a formula - each member is copied over
Formula(const Formula & f) {
literals = f.literals;
clauses = f.clauses;
literal_frequency = f.literal_frequency;
literal_polarity = f.literal_polarity;
}
};
/*
* class to represent the structure and functions of the SAT Solver
*/
class SATSolverDPLL {
private:
Formula formula; // the initial formula given as input
int literal_count; // the number of variables in the formula
int clause_count; // the number of clauses in the formula
int unit_propagate(Formula & ); // performs unit propagation
int DPLL(Formula); // performs DPLL recursively
int apply_transform(Formula & ,
int); // applies the value of the literal in every clause
void show_result(Formula & , int); // displays the result
public:
SATSolverDPLL() {}
void initialize(std::string); // intiializes the values
void solve(); // calls the solver
};
/*
* function that accepts the inputs from the user and initializes the attributes
* in the solver
*/
void SATSolverDPLL::initialize(std::string filename) {
char c; // store first character
string s; // dummy string
std::ifstream in(filename);
std::streambuf *cinbuf = std::cin.rdbuf(); //save old buf
std::cin.rdbuf(in.rdbuf()); //redirect std::cin to in.txt!
while (true) {
cin >> c;
if (c == 'c') // if comment
{
getline(cin, s); // ignore
} else // else, if would be a p
{
cin >> s; // this would be cnf
break;
}
}
cin >> literal_count;
cin >> clause_count;
// set the vectors to their appropriate sizes and initial values
formula.literals.clear();
formula.literals.resize(literal_count, -1);
formula.clauses.clear();
formula.clauses.resize(clause_count);
formula.literal_frequency.clear();
formula.literal_frequency.resize(literal_count, 0);
formula.literal_polarity.clear();
formula.literal_polarity.resize(literal_count, 0);
int literal; // store the incoming literal value
// iterate over the clauses
for (int i = 0; i < clause_count; i++) {
while (true) // while the ith clause gets more literals
{
cin >> literal;
if (literal > 0) // if the variable has positive polarity
{
formula.clauses[i].push_back(2 *
(literal - 1)); // store it in the form 2n
// increment frequency and polarity of the literal
formula.literal_frequency[literal - 1]++;
formula.literal_polarity[literal - 1]++;
} else if (literal < 0) // if the variable has negative polarity
{
formula.clauses[i].push_back(2 * ((-1) * literal - 1) +
1); // store it in the form 2n+1
// increment frequency and decrement polarity of the literal
formula.literal_frequency[-1 - literal]++;
formula.literal_polarity[-1 - literal]--;
} else {
break; // read 0, so move to next clause
}
}
}
}
/*
* function to perform unit resolution in a given formula
* arguments: f - the formula to perform unit resolution on
* return value: int - the status of the solver after unit resolution, a member
* of the Cat enum Cat::satisfied - the formula has been satisfied
* Cat::unsatisfied - the formula can no longer be satisfied
* Cat::normal - normal exit
*/
__global__ void compute(int * ret_literals, int * literals, int * clauses, int * clauses_size, int * literal_frequency) {
int block = blockIdx.x;
if (clauses_size[block] == 1) {
literals[clauses[block] / 2] = clauses[block] % 2;
literal_frequency[clauses[block] / 2] = -1; // once assigned, reset the frequency to mark it closed
ret_literals[block] = clauses[block];
} else {
ret_literals[block] = 0;
}
}
int SATSolverDPLL::unit_propagate(Formula & f) {
bool unit_clause_found =
false; // stores whether the current iteration found a unit clause
if (f.clauses.size() == 0) // if the formula contains no clauses
{
return Cat::satisfied; // it is vacuously satisfied
}
int * ret_literals, * literals, * clauses, * clauses_size, * literal_frequency;
do {
unit_clause_found = false;
cudaMalloc((void ** ) & ret_literals, sizeof(int) * f.clauses.size());
cudaMalloc((void ** ) & literals, sizeof(int) * f.literals.size());
cudaMalloc((void ** ) & clauses_size, sizeof(int) * f.clauses.size());
cudaMalloc((void ** ) & clauses, sizeof(int) * f.clauses.size());
cudaMalloc((void ** ) & literal_frequency, sizeof(int) * f.literal_frequency.size());
cudaMemcpy(literals, & f.literals[0], sizeof(int) * f.literals.size(), cudaMemcpyHostToDevice);
cudaMemcpy(literal_frequency, & f.literal_frequency[0], sizeof(int) * f.literal_frequency.size(), cudaMemcpyHostToDevice);
int hclauses_size[f.clauses.size()];
int hclauses[f.clauses.size()];
int hret_literals[f.clauses.size()];
for (int i = 0; i < f.clauses.size(); i++) {
hclauses_size[i] = f.clauses[i].size();
if (hclauses_size[i] == 0) {
cudaFree(ret_literals);
cudaFree(literals);
cudaFree(clauses_size);
cudaFree(clauses);
cudaFree(literal_frequency);
return Cat::unsatisfied;
}
hclauses[i] = f.clauses[i][0];
}
cudaMemcpy(clauses_size, & hclauses_size, sizeof(int) * f.clauses.size(), cudaMemcpyHostToDevice);
cudaMemcpy(clauses, & hclauses, sizeof(int) * f.clauses.size(), cudaMemcpyHostToDevice);
compute << < f.clauses.size(), 1 >>> (ret_literals, literals, clauses, clauses_size, literal_frequency);
cudaMemcpy( & hret_literals, ret_literals, sizeof(int) * f.clauses.size(), cudaMemcpyDeviceToHost);
cudaMemcpy( & f.literals[0], literals, sizeof(int) * f.literals.size(), cudaMemcpyDeviceToHost);
cudaMemcpy( & f.literal_frequency[0], literal_frequency, sizeof(int) * f.literal_frequency.size(), cudaMemcpyDeviceToHost);
for(int i = 0; i < f.clauses.size(); i++)
{
if(hret_literals[i] == 0)
{
continue;
}
else
{
unit_clause_found = true;
int result = apply_transform(f, hret_literals[i] / 2);
if (result == Cat::satisfied || result == Cat::unsatisfied)
{
return result;
}
}
}
cudaFree(ret_literals);
cudaFree(literals);
cudaFree(clauses_size);
cudaFree(clauses);
cudaFree(literal_frequency);
} while (unit_clause_found);
return Cat::normal; // if reached here, the unit resolution ended normally
}
/*
* applies a value of a literal to all clauses in a given formula
* arguments: f - the formula to apply on
* literal_to_apply - the literal which has just been set
* return value: int - the return status flag, a member of the Cat enum
* Cat::satisfied - the formula has been satisfied
* Cat::unsatisfied - the formula can no longer be satisfied
* Cat::normal - normal exit
*/
int SATSolverDPLL::apply_transform(Formula & f, int literal_to_apply) {
int value_to_apply = f.literals[literal_to_apply]; // the value to apply, 0 -
// if true, 1 - if false
// iterate over the clauses in f
for (int i = 0; i < f.clauses.size(); i++) {
// iterate over the variables in the clause
for (int j = 0; j < f.clauses[i].size(); j++) {
// if this is true, then the literal appears with the same polarity as it
// is being applied that is, if assigned true, it appears positive if
// assigned false, it appears negative, in this clause hence, the clause
// has now become true
if ((2 * literal_to_apply + value_to_apply) == f.clauses[i][j]) {
f.clauses.erase(f.clauses.begin() +
i); // remove the clause from the list
i--; // reset iterator
if (f.clauses.size() ==
0) // if all clauses have been removed, the formula is satisfied
{
return Cat::satisfied;
}
break; // move to the next clause
} else if (f.clauses[i][j] / 2 ==
literal_to_apply) // the literal appears with opposite polarity
{
f.clauses[i].erase(
f.clauses[i].begin() +
j); // remove the literal from the clause, as it is false in it
j--; // reset the iterator
if (f.clauses[i].size() ==
0) // if the clause is empty, the formula is unsatisfiable currently
{
return Cat::unsatisfied;
}
break; // move to the next clause
}
}
}
// if reached here, the function is exiting normally
return Cat::normal;
}
/*
* function to perform the recursive DPLL on a given formula
* argument: f - the formula to perform DPLL on
* return value: int - the return status flag, a member of the Cat enum
* Cat::normal - exited normally
* Cat::completed - result has been found, exit recursion all the
* way
*/
int SATSolverDPLL::DPLL(Formula f) {
int result = unit_propagate(f); // perform unit propagation on the formula
if (result == Cat::satisfied) // if formula satisfied, show result and return
{
show_result(f, result);
return Cat::completed;
} else if (result == Cat::unsatisfied) // if formula not satisfied in this
// branch, return normally
{
return Cat::normal;
}
// find the variable with maximum frequency in f, which will be the next to be
// assigned a value already assigned variables have this field reset to -1 in
// order to ignore them
int i = distance(
f.literal_frequency.begin(),
max_element(f.literal_frequency.begin(), f.literal_frequency.end()));
// need to apply twice, once true, the other false
for (int j = 0; j < 2; j++) {
Formula new_f = f; // copy the formula before recursing
if (new_f.literal_polarity[i] >
0) // if the number of literals with positive polarity are greater
{
new_f.literals[i] = j; // assign positive first
} else // if not
{
new_f.literals[i] = (j + 1) % 2; // assign negative first
}
new_f.literal_frequency[i] = -1; // reset the frequency to -1 to ignore in the future
int transform_result =
apply_transform(new_f, i); // apply the change to all the clauses
if (transform_result ==
Cat::satisfied) // if formula satisfied, show result and return
{
show_result(new_f, transform_result);
return Cat::completed;
} else if (transform_result == Cat::unsatisfied) // if formula not satisfied
// in this branch, return
// normally
{
continue;
}
int dpll_result = DPLL(new_f); // recursively call DPLL on the new formula
if (dpll_result == Cat::completed) // propagate the result, if completed
{
return dpll_result;
}
}
// if the control reaches here, the function has returned normally
return Cat::normal;
}
/*
* function to display the result of the solver
* arguments: f - the formula when it was satisfied or shown to be unsatisfiable
* result - the result flag, a member of the Cat enum
*/
void SATSolverDPLL::show_result(Formula & f, int result) {
if (result == Cat::satisfied) // if the formula is satisfiable
{
cout << "SAT" << endl;
for (int i = 0; i < f.literals.size(); i++) {
if (i != 0) {
cout << " ";
}
if (f.literals[i] != -1) {
cout << pow(-1, f.literals[i]) * (i + 1);
} else // for literals which can take either value, arbitrarily assign
// them to be true
{
cout << (i + 1);
}
}
cout << " 0";
} else // if the formula is unsatisfiable
{
cout << "UNSAT";
}
}
/*
* function to call the solver
*/
void SATSolverDPLL::solve() {
int result = DPLL(formula); // final result of DPLL on the original formula
// if normal return till the end, then the formula could not be satisfied in
// any branch, so it is unsatisfiable
if (result == Cat::normal) {
show_result(formula, Cat::unsatisfied); // the argument formula is a dummy
// here, the result is UNSAT
}
}
int main() {
SATSolverDPLL solver; // create the solver
solver.initialize("problem1.cnf"); // initialize
solver.solve(); // solve
return 0;
} |
f3cae2c0a525fcf7a1123414f25f81220831820b.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 XGBoost contributors
*/
#include <vector>
#include <algorithm>
#include <utility>
#include <dmlc/omp.h>
#include <dmlc/timer.h>
#include "xgboost/logging.h"
#include "xgboost/objective.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "../common/math.h"
#include "../common/random.h"
#if defined(__HIPCC__)
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/random/linear_congruential_engine.h>
#include <hipcub/hipcub.hpp>
#include "../common/device_helpers.cuh"
#endif
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(rank_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
size_t num_pairsample;
float fix_list_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(LambdaRankParam) {
DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1)
.describe("Number of pair generated for each instance.");
DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f)
.describe("Normalize the weight of each list by this value,"
" if equals 0, no effect will happen");
}
};
#if defined(__HIPCC__)
// Helper functions
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheLeftOf(const T *__restrict__ items, uint32_t n, T v) {
return thrust::lower_bound(thrust::seq, items, items + n, v,
thrust::greater<T>()) -
items;
}
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheRightOf(const T *__restrict__ items, uint32_t n, T v) {
return n - (thrust::upper_bound(thrust::seq, items, items + n, v,
thrust::greater<T>()) -
items);
}
#endif
/*! \brief helper information in a list */
struct ListEntry {
/*! \brief the predict score we in the data */
bst_float pred;
/*! \brief the actual label of the entry */
bst_float label;
/*! \brief row index in the data matrix */
unsigned rindex;
// constructor
ListEntry(bst_float pred, bst_float label, unsigned rindex)
: pred(pred), label(label), rindex(rindex) {}
// comparator by prediction
inline static bool CmpPred(const ListEntry &a, const ListEntry &b) {
return a.pred > b.pred;
}
// comparator by label
inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) {
return a.label > b.label;
}
};
/*! \brief a pair in the lambda rank */
struct LambdaPair {
/*! \brief positive index: this is a position in the list */
unsigned pos_index;
/*! \brief negative index: this is a position in the list */
unsigned neg_index;
/*! \brief weight to be filled in */
bst_float weight;
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index)
: pos_index(pos_index), neg_index(neg_index), weight(1.0f) {}
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight)
: pos_index(pos_index), neg_index(neg_index), weight(weight) {}
};
class PairwiseLambdaWeightComputer {
public:
/*!
* \brief get lambda weight for existing pairs - for pairwise objective
* \param list a list that is sorted by pred score
* \param io_pairs record of pairs, containing the pairs to fill in weights
*/
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {}
static char const* Name() {
return "rank:pairwise";
}
#if defined(__HIPCC__)
PairwiseLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {}
class PairwiseLambdaWeightMultiplier {
public:
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
return 1.0f;
}
};
inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const {
return {};
}
#endif
};
#if defined(__HIPCC__)
class BaseLambdaWeightMultiplier {
public:
BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const dh::SegmentSorter<float> &segment_pred_sorter)
: dsorted_labels_(segment_label_sorter.GetItemsSpan()),
dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()),
dgroups_(segment_label_sorter.GetGroupsSpan()),
dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {}
protected:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels
// before they are sorted
const common::Span<const uint32_t> dgroups_; // The group indices
// Where can a prediction for a label be found in the original array, when they are sorted
const common::Span<const uint32_t> dindexable_sorted_preds_pos_;
};
// While computing the weight that needs to be adjusted by this ranking objective, we need
// to figure out where positive and negative labels chosen earlier exists, if the group
// were to be sorted by its predictions. To accommodate this, we employ the following algorithm.
// For a given group, let's assume the following:
// labels: 1 5 9 2 4 8 0 7 6 3
// predictions: 1 9 0 8 2 7 3 6 5 4
// position: 0 1 2 3 4 5 6 7 8 9
//
// After label sort:
// labels: 9 8 7 6 5 4 3 2 1 0
// position: 2 5 7 8 1 4 9 3 0 6
//
// After prediction sort:
// predictions: 9 8 7 6 5 4 3 2 1 0
// position: 1 3 5 7 8 9 6 4 0 2
//
// If a sorted label at position 'x' is chosen, then we need to find out where the prediction
// for this label 'x' exists, if the group were to be sorted by predictions.
// We first take the sorted prediction positions:
// position: 1 3 5 7 8 9 6 4 0 2
// at indices: 0 1 2 3 4 5 6 7 8 9
//
// We create a sorted prediction positional array, such that value at position 'x' gives
// us the position in the sorted prediction array where its related prediction lies.
// dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5
// at indices: 0 1 2 3 4 5 6 7 8 9
// Basically, swap the previous 2 arrays, sort the indices and reorder positions
// for an O(1) lookup using the position where the sorted label exists.
//
// This type does that using the SegmentSorter
class IndexablePredictionSorter {
public:
IndexablePredictionSorter(const bst_float *dpreds,
const dh::SegmentSorter<float> &segment_label_sorter) {
// Sort the predictions first
segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(),
segment_label_sorter.GetGroupSegmentsSpan());
// Create an index for the sorted prediction positions
segment_pred_sorter_.CreateIndexableSortedPositions();
}
inline const dh::SegmentSorter<float> &GetPredictionSorter() const {
return segment_pred_sorter_;
}
private:
dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions
};
#endif
// beta version: NDCG lambda rank
class NDCGLambdaWeightComputer
#if defined(__HIPCC__)
: public IndexablePredictionSorter
#endif
{
public:
#if defined(__HIPCC__)
// This function object computes the item's DCG value
class ComputeItemDCG : public thrust::unary_function<uint32_t, float> {
public:
XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels,
const common::Span<const uint32_t> &dgroups,
const common::Span<const uint32_t> &gidxs)
: dsorted_labels_(dsorted_labels),
dgroups_(dgroups),
dgidxs_(gidxs) {}
// Compute DCG for the item at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]);
}
private:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dgroups_; // The group indices - where each group
// begins and ends
const common::Span<const uint32_t> dgidxs_; // The group each items belongs to
};
// Type containing device pointers that can be cheaply copied on the kernel
class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const NDCGLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
if (dgroup_dcgs_[gidx] == 0.0) return 0.0f;
uint32_t group_begin = dgroups_[gidx];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return NDCGLambdaWeightComputer::ComputeDeltaWeight(
pos_pred_pos, neg_pred_pos,
static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]),
dgroup_dcgs_[gidx]);
}
private:
const common::Span<const float> dgroup_dcgs_; // Group DCG values
};
NDCGLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f),
weight_multiplier_(segment_label_sorter, *this) {
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements DCG values and reduce them across groups concurrently.
auto end_range =
thrust::reduce_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of items DCG values within a group
dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()),
ComputeItemDCG(segment_label_sorter.GetItemsSpan(),
segment_label_sorter.GetGroupsSpan(),
group_segments)),
thrust::make_discard_iterator(), // We don't care for the group indices
dgroup_dcg_.begin()); // Sum of the item's DCG values in the group
CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size());
}
inline const common::Span<const float> GetGroupDcgsSpan() const {
return { dgroup_dcg_.data().get(), dgroup_dcg_.size() };
}
inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const {
return weight_multiplier_;
}
#endif
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
float IDCG; // NOLINT
{
std::vector<bst_float> labels(sorted_list.size());
for (size_t i = 0; i < sorted_list.size(); ++i) {
labels[i] = sorted_list[i].label;
}
std::stable_sort(labels.begin(), labels.end(), std::greater<>());
IDCG = ComputeGroupDCGWeight(&labels[0], labels.size());
}
if (IDCG == 0.0) {
for (auto & pair : pairs) {
pair.weight = 0.0f;
}
} else {
for (auto & pair : pairs) {
unsigned pos_idx = pair.pos_index;
unsigned neg_idx = pair.neg_index;
pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx,
sorted_list[pos_idx].label, sorted_list[neg_idx].label,
IDCG);
}
}
}
static char const* Name() {
return "rank:ndcg";
}
inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) {
double sumdcg = 0.0;
for (uint32_t i = 0; i < size; ++i) {
sumdcg += ComputeItemDCGWeight(sorted_labels[i], i);
}
return static_cast<bst_float>(sumdcg);
}
private:
XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) {
return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0;
}
// Compute the weight adjustment for an item within a group:
// pos_pred_pos => Where does the positive label live, had the list been sorted by prediction
// neg_pred_pos => Where does the negative label live, had the list been sorted by prediction
// pos_label => positive label value from sorted label list
// neg_label => negative label value from sorted label list
XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos,
uint32_t neg_pred_pos,
int pos_label, int neg_label,
float idcg) {
float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f);
float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f);
bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv;
float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv;
bst_float delta = (original - changed) * (1.0f / idcg);
if (delta < 0.0f) delta = - delta;
return delta;
}
#if defined(__HIPCC__)
dh::caching_device_vector<float> dgroup_dcg_;
// This computes the adjustment to the weight
const NDCGLambdaWeightMultiplier weight_multiplier_;
#endif
};
class MAPLambdaWeightComputer
#if defined(__HIPCC__)
: public IndexablePredictionSorter
#endif
{
public:
struct MAPStats {
/*! \brief the accumulated precision */
float ap_acc{0.0f};
/*!
* \brief the accumulated precision,
* assuming a positive instance is missing
*/
float ap_acc_miss{0.0f};
/*!
* \brief the accumulated precision,
* assuming that one more positive instance is inserted ahead
*/
float ap_acc_add{0.0f};
/* \brief the accumulated positive instance count */
float hits{0.0f};
XGBOOST_DEVICE MAPStats() {} // NOLINT
XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits)
: ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {}
// For prefix scan
XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const {
return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss,
ap_acc_add + v1.ap_acc_add, hits + v1.hits};
}
// For test purposes - compare for equality
XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const {
return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss &&
ap_acc_add == rhs.ap_acc_add && hits == rhs.hits;
}
};
private:
template <typename T>
XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) {
#if defined(__HIPCC__)
thrust::swap(v0, v1);
#else
std::swap(v0, v1);
#endif
}
/*!
* \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or
* neg_pred_pos when sorted by predictions
* \param pos_pred_pos positive label's prediction value position when the groups prediction
* values are sorted
* \param neg_pred_pos negative label's prediction value position when the groups prediction
* values are sorted
* \param pos_label, neg_label the chosen positive and negative labels
* \param p_map_stats a vector containing the accumulated precisions for each position in a list
* \param map_stats_size size of the accumulated precisions vector
*/
XGBOOST_DEVICE inline static bst_float GetLambdaMAP(
int pos_pred_pos, int neg_pred_pos,
bst_float pos_label, bst_float neg_label,
const MAPStats *p_map_stats, uint32_t map_stats_size) {
if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) {
return 0.0f;
}
if (pos_pred_pos > neg_pred_pos) {
Swap(pos_pred_pos, neg_pred_pos);
Swap(pos_label, neg_label);
}
bst_float original = p_map_stats[neg_pred_pos].ap_acc;
if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc;
bst_float changed = 0;
bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f;
bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f;
if (label1 == label2) {
return 0.0;
} else if (label1 < label2) {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add;
changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1);
} else {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss;
changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1);
}
bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits);
if (ans < 0) ans = -ans;
return ans;
}
public:
/*
* \brief obtain preprocessing results for calculating delta MAP
* \param sorted_list the list containing entry information
* \param map_stats a vector containing the accumulated precisions for each position in a list
*/
inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list,
std::vector<MAPStats> *p_map_acc) {
std::vector<MAPStats> &map_acc = *p_map_acc;
map_acc.resize(sorted_list.size());
bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (size_t i = 1; i <= sorted_list.size(); ++i) {
if (sorted_list[i - 1].label > 0.0f) {
hit++;
acc1 += hit / i;
acc2 += (hit - 1) / i;
acc3 += (hit + 1) / i;
}
map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit);
}
}
static char const* Name() {
return "rank:map";
}
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
std::vector<MAPStats> map_stats;
GetMAPStats(sorted_list, &map_stats);
for (auto & pair : pairs) {
pair.weight *=
GetLambdaMAP(pair.pos_index, pair.neg_index,
sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label,
&map_stats[0], map_stats.size());
}
}
#if defined(__HIPCC__)
MAPLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()),
weight_multiplier_(segment_label_sorter, *this) {
this->CreateMAPStats(dlabels, segment_label_sorter);
}
void CreateMAPStats(const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {
// For each group, go through the sorted prediction positions, and look up its corresponding
// label from the unsorted labels (from the original label list)
// For each item in the group, compute its MAP stats.
// Interleave the computation of map stats amongst different groups.
// First, determine postive labels in the dataset individually
auto nitems = segment_label_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> dhits(nitems, 0);
// Original positions of the predictions after they have been sorted
const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan();
// Unsorted labels
const float *unsorted_labels = dlabels;
auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) {
return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
dhits.begin(),
DeterminePositiveLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the positive labels that are segmented to accumulate them.
// This is required for computing the accumulated precisions
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
dhits.begin(), // Input value
dhits.begin()); // In-place scan
// Compute accumulated precisions for each item, assuming positive and
// negative instances are missing.
// But first, compute individual item precisions
const auto *dhits_arr = dhits.data().get();
// Group info on device
const auto &dgroups = segment_label_sorter.GetGroupsSpan();
auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) {
if (unsorted_labels[pred_original_pos[idx]] > 0.0f) {
auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1;
return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group,
static_cast<float>(dhits_arr[idx] - 1) / idx_within_group,
static_cast<float>(dhits_arr[idx] + 1) / idx_within_group,
1.0f};
}
return MAPStats{};
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
this->dmap_stats_.begin(),
ComputeItemPrecisionLambda);
// Lastly, compute the accumulated precisions for all the items segmented by groups.
// The precisions are accumulated within each group
thrust::inclusive_scan_by_key(thrust::hip::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
this->dmap_stats_.begin(), // Input map stats
this->dmap_stats_.begin()); // In-place scan and output here
}
inline const common::Span<const MAPStats> GetMapStatsSpan() const {
return { dmap_stats_.data().get(), dmap_stats_.size() };
}
// Type containing device pointers that can be cheaply copied on the kernel
class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const MAPLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dmap_stats_(lwc.GetMapStatsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
uint32_t group_begin = dgroups_[gidx];
uint32_t group_end = dgroups_[gidx + 1];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return MAPLambdaWeightComputer::GetLambdaMAP(
pos_pred_pos, neg_pred_pos,
dsorted_labels_[pidx], dsorted_labels_[nidx],
&dmap_stats_[group_begin], group_end - group_begin);
}
private:
common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted
// prediction value
};
inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; }
private:
dh::caching_device_vector<MAPStats> dmap_stats_;
// This computes the adjustment to the weight
const MAPLambdaWeightMultiplier weight_multiplier_;
#endif
};
#if defined(__HIPCC__)
class SortedLabelList : dh::SegmentSorter<float> {
private:
const LambdaRankParam ¶m_; // Objective configuration
public:
explicit SortedLabelList(const LambdaRankParam ¶m)
: param_(param) {}
// Sort the labels that are grouped by 'groups'
void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) {
this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups);
}
// This kernel can only run *after* the kernel in sort is completed, as they
// use the default stream
template <typename LambdaWeightComputerT>
void ComputeGradients(const bst_float *dpreds, // Unsorted predictions
const bst_float *dlabels, // Unsorted labels
const HostDeviceVector<bst_float> &weights,
int iter,
GradientPair *out_gpair,
float weight_normalization_factor) {
// Group info on device
const auto &dgroups = this->GetGroupsSpan();
uint32_t ngroups = this->GetNumGroups() + 1;
uint32_t total_items = this->GetNumItems();
uint32_t niter = param_.num_pairsample * total_items;
float fix_list_weight = param_.fix_list_weight;
const auto &original_pos = this->GetOriginalPositionsSpan();
uint32_t num_weights = weights.Size();
auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr;
const auto &sorted_labels = this->GetItemsSpan();
// This is used to adjust the weight of different elements based on the different ranking
// objective function policies
LambdaWeightComputerT weight_computer(dpreds, dlabels, *this);
auto wmultiplier = weight_computer.GetWeightMultiplier();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each instance in the group, compute the gradient pair concurrently
dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) {
// First, determine the group 'idx' belongs to
uint32_t item_idx = idx % total_items;
uint32_t group_idx =
thrust::upper_bound(thrust::seq, dgroups.begin(),
dgroups.begin() + ngroups, item_idx) -
dgroups.begin();
// Span of this group within the larger labels/predictions sorted tuple
uint32_t group_begin = dgroups[group_idx - 1];
uint32_t group_end = dgroups[group_idx];
uint32_t total_group_items = group_end - group_begin;
// Are the labels diverse enough? If they are all the same, then there is nothing to pick
// from another group - bail sooner
if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return;
// Find the number of labels less than and greater than the current label
// at the sorted index position item_idx
uint32_t nleft = CountNumItemsToTheLeftOf(
sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]);
uint32_t nright = CountNumItemsToTheRightOf(
sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]);
// Create a minstd_rand object to act as our source of randomness
thrust::minstd_rand rng((iter + 1) * 1111);
rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin);
// Create a uniform_int_distribution to produce a sample from outside of the
// present label group
thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1);
int sample = dist(rng);
int pos_idx = -1; // Bigger label
int neg_idx = -1; // Smaller label
// Are we picking a sample to the left/right of the current group?
if (sample < nleft) {
// Go left
pos_idx = sample + group_begin;
neg_idx = item_idx;
} else {
pos_idx = item_idx;
uint32_t items_in_group = total_group_items - nleft - nright;
neg_idx = sample + items_in_group + group_begin;
}
// Compute and assign the gradients now
const float eps = 1e-16f;
bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]);
bst_float g = p - 1.0f;
bst_float h = thrust::max(p * (1.0f - p), eps);
// Rescale each gradient and hessian so that the group has a weighted constant
float scale = __frcp_ru(niter / total_items);
if (fix_list_weight != 0.0f) {
scale *= fix_list_weight / total_group_items;
}
float weight = num_weights ? dweights[group_idx - 1] : 1.0f;
weight *= weight_normalization_factor;
weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx);
weight *= scale;
// Accumulate gradient and hessian in both positive and negative indices
const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair);
const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair);
});
// Wait until the computations done by the kernel is complete
dh::safe_cuda(hipStreamSynchronize(nullptr));
}
};
#endif
// objective for lambda rank
template <typename LambdaWeightComputerT>
class LambdaRankObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match";
// quick consistency when group is not available
std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size())
<< "group structure not consistent with #rows" << ", "
<< "group ponter size: " << gptr.size() << ", "
<< "labels size: " << info.labels_.Size() << ", "
<< "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back());
#if defined(__HIPCC__)
// Check if we have a GPU assignment; else, revert back to CPU
auto device = tparam_->gpu_id;
if (device >= 0) {
ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr);
} else {
// Revert back to CPU
#endif
ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr);
#if defined(__HIPCC__)
}
#endif
}
const char* DefaultEvalMetric() const override {
return "map";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(LambdaWeightComputerT::Name());
out["lambda_rank_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["lambda_rank_param"], ¶m_);
}
private:
bst_float ComputeWeightNormalizationFactor(const MetaInfo& info,
const std::vector<unsigned> &gptr) {
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
bst_float sum_weights = 0;
for (bst_omp_uint k = 0; k < ngroup; ++k) {
sum_weights += info.GetWeight(k);
}
return ngroup / sum_weights;
}
void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU.";
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
const auto& preds_h = preds.HostVector();
const auto& labels = info.labels_.HostVector();
std::vector<GradientPair>& gpair = out_gpair->HostVector();
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
out_gpair->Resize(preds.Size());
#pragma omp parallel
{
// parallel construct, declare random number generator here, so that each
// thread use its own random number generator, seed by thread id and current iteration
std::minstd_rand rnd((iter + 1) * 1111);
std::vector<LambdaPair> pairs;
std::vector<ListEntry> lst;
std::vector< std::pair<bst_float, unsigned> > rec;
#pragma omp for schedule(static)
for (bst_omp_uint k = 0; k < ngroup; ++k) {
lst.clear(); pairs.clear();
for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) {
lst.emplace_back(preds_h[j], labels[j], j);
gpair[j] = GradientPair(0.0f, 0.0f);
}
std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred);
rec.resize(lst.size());
for (unsigned i = 0; i < lst.size(); ++i) {
rec[i] = std::make_pair(lst[i].label, i);
}
std::stable_sort(rec.begin(), rec.end(), common::CmpFirst);
// enumerate buckets with same label, for each item in the lst, grab another sample randomly
for (unsigned i = 0; i < rec.size(); ) {
unsigned j = i + 1;
while (j < rec.size() && rec[j].first == rec[i].first) ++j;
// bucket in [i,j), get a sample outside bucket
unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j);
if (nleft + nright != 0) {
int nsample = param_.num_pairsample;
while (nsample --) {
for (unsigned pid = i; pid < j; ++pid) {
unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd);
if (ridx < nleft) {
pairs.emplace_back(rec[ridx].second, rec[pid].second,
info.GetWeight(k) * weight_normalization_factor);
} else {
pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second,
info.GetWeight(k) * weight_normalization_factor);
}
}
}
}
i = j;
}
// get lambda weight for the pairs
LambdaWeightComputerT::GetLambdaWeight(lst, &pairs);
// rescale each gradient and hessian so that the lst have constant weighted
float scale = 1.0f / param_.num_pairsample;
if (param_.fix_list_weight != 0.0f) {
scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]);
}
for (auto & pair : pairs) {
const ListEntry &pos = lst[pair.pos_index];
const ListEntry &neg = lst[pair.neg_index];
const bst_float w = pair.weight * scale;
const float eps = 1e-16f;
bst_float p = common::Sigmoid(pos.pred - neg.pred);
bst_float g = p - 1.0f;
bst_float h = ::max(p * (1.0f - p), eps);
// accumulate gradient and hessian in both pid, and nid
gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h);
gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h);
}
}
}
}
#if defined(__HIPCC__)
void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU.";
auto device = tparam_->gpu_id;
dh::safe_cuda(hipSetDevice(device));
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
// Set the device ID and copy them to the device
out_gpair->SetDevice(device);
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
out_gpair->Resize(preds.Size());
auto d_preds = preds.ConstDevicePointer();
auto d_gpair = out_gpair->DevicePointer();
auto d_labels = info.labels_.ConstDevicePointer();
SortedLabelList slist(param_);
// Sort the labels within the groups on the device
slist.Sort(info.labels_, gptr);
// Initialize the gradients next
out_gpair->Fill(GradientPair(0.0f, 0.0f));
// Finally, compute the gradients
slist.ComputeGradients<LambdaWeightComputerT>
(d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor);
}
#endif
LambdaRankParam param_;
};
#if !defined(GTEST_TEST)
// register the objective functions
DMLC_REGISTER_PARAMETER(LambdaRankParam);
XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name())
.describe("Pairwise rank objective.")
.set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name())
.describe("LambdaRank with NDCG as objective.")
.set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name())
.describe("LambdaRank with MAP as objective.")
.set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); });
#endif
} // namespace obj
} // namespace xgboost
| f3cae2c0a525fcf7a1123414f25f81220831820b.cu | /*!
* Copyright 2015-2019 XGBoost contributors
*/
#include <vector>
#include <algorithm>
#include <utility>
#include <dmlc/omp.h>
#include <dmlc/timer.h>
#include "xgboost/logging.h"
#include "xgboost/objective.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "../common/math.h"
#include "../common/random.h"
#if defined(__CUDACC__)
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/random/linear_congruential_engine.h>
#include <cub/util_allocator.cuh>
#include "../common/device_helpers.cuh"
#endif
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(rank_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
size_t num_pairsample;
float fix_list_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(LambdaRankParam) {
DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1)
.describe("Number of pair generated for each instance.");
DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f)
.describe("Normalize the weight of each list by this value,"
" if equals 0, no effect will happen");
}
};
#if defined(__CUDACC__)
// Helper functions
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheLeftOf(const T *__restrict__ items, uint32_t n, T v) {
return thrust::lower_bound(thrust::seq, items, items + n, v,
thrust::greater<T>()) -
items;
}
template <typename T>
XGBOOST_DEVICE __forceinline__ uint32_t
CountNumItemsToTheRightOf(const T *__restrict__ items, uint32_t n, T v) {
return n - (thrust::upper_bound(thrust::seq, items, items + n, v,
thrust::greater<T>()) -
items);
}
#endif
/*! \brief helper information in a list */
struct ListEntry {
/*! \brief the predict score we in the data */
bst_float pred;
/*! \brief the actual label of the entry */
bst_float label;
/*! \brief row index in the data matrix */
unsigned rindex;
// constructor
ListEntry(bst_float pred, bst_float label, unsigned rindex)
: pred(pred), label(label), rindex(rindex) {}
// comparator by prediction
inline static bool CmpPred(const ListEntry &a, const ListEntry &b) {
return a.pred > b.pred;
}
// comparator by label
inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) {
return a.label > b.label;
}
};
/*! \brief a pair in the lambda rank */
struct LambdaPair {
/*! \brief positive index: this is a position in the list */
unsigned pos_index;
/*! \brief negative index: this is a position in the list */
unsigned neg_index;
/*! \brief weight to be filled in */
bst_float weight;
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index)
: pos_index(pos_index), neg_index(neg_index), weight(1.0f) {}
// constructor
LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight)
: pos_index(pos_index), neg_index(neg_index), weight(weight) {}
};
class PairwiseLambdaWeightComputer {
public:
/*!
* \brief get lambda weight for existing pairs - for pairwise objective
* \param list a list that is sorted by pred score
* \param io_pairs record of pairs, containing the pairs to fill in weights
*/
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {}
static char const* Name() {
return "rank:pairwise";
}
#if defined(__CUDACC__)
PairwiseLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {}
class PairwiseLambdaWeightMultiplier {
public:
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
return 1.0f;
}
};
inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const {
return {};
}
#endif
};
#if defined(__CUDACC__)
class BaseLambdaWeightMultiplier {
public:
BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const dh::SegmentSorter<float> &segment_pred_sorter)
: dsorted_labels_(segment_label_sorter.GetItemsSpan()),
dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()),
dgroups_(segment_label_sorter.GetGroupsSpan()),
dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {}
protected:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels
// before they are sorted
const common::Span<const uint32_t> dgroups_; // The group indices
// Where can a prediction for a label be found in the original array, when they are sorted
const common::Span<const uint32_t> dindexable_sorted_preds_pos_;
};
// While computing the weight that needs to be adjusted by this ranking objective, we need
// to figure out where positive and negative labels chosen earlier exists, if the group
// were to be sorted by its predictions. To accommodate this, we employ the following algorithm.
// For a given group, let's assume the following:
// labels: 1 5 9 2 4 8 0 7 6 3
// predictions: 1 9 0 8 2 7 3 6 5 4
// position: 0 1 2 3 4 5 6 7 8 9
//
// After label sort:
// labels: 9 8 7 6 5 4 3 2 1 0
// position: 2 5 7 8 1 4 9 3 0 6
//
// After prediction sort:
// predictions: 9 8 7 6 5 4 3 2 1 0
// position: 1 3 5 7 8 9 6 4 0 2
//
// If a sorted label at position 'x' is chosen, then we need to find out where the prediction
// for this label 'x' exists, if the group were to be sorted by predictions.
// We first take the sorted prediction positions:
// position: 1 3 5 7 8 9 6 4 0 2
// at indices: 0 1 2 3 4 5 6 7 8 9
//
// We create a sorted prediction positional array, such that value at position 'x' gives
// us the position in the sorted prediction array where its related prediction lies.
// dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5
// at indices: 0 1 2 3 4 5 6 7 8 9
// Basically, swap the previous 2 arrays, sort the indices and reorder positions
// for an O(1) lookup using the position where the sorted label exists.
//
// This type does that using the SegmentSorter
class IndexablePredictionSorter {
public:
IndexablePredictionSorter(const bst_float *dpreds,
const dh::SegmentSorter<float> &segment_label_sorter) {
// Sort the predictions first
segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(),
segment_label_sorter.GetGroupSegmentsSpan());
// Create an index for the sorted prediction positions
segment_pred_sorter_.CreateIndexableSortedPositions();
}
inline const dh::SegmentSorter<float> &GetPredictionSorter() const {
return segment_pred_sorter_;
}
private:
dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions
};
#endif
// beta version: NDCG lambda rank
class NDCGLambdaWeightComputer
#if defined(__CUDACC__)
: public IndexablePredictionSorter
#endif
{
public:
#if defined(__CUDACC__)
// This function object computes the item's DCG value
class ComputeItemDCG : public thrust::unary_function<uint32_t, float> {
public:
XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels,
const common::Span<const uint32_t> &dgroups,
const common::Span<const uint32_t> &gidxs)
: dsorted_labels_(dsorted_labels),
dgroups_(dgroups),
dgidxs_(gidxs) {}
// Compute DCG for the item at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]);
}
private:
const common::Span<const float> dsorted_labels_; // Labels sorted within a group
const common::Span<const uint32_t> dgroups_; // The group indices - where each group
// begins and ends
const common::Span<const uint32_t> dgidxs_; // The group each items belongs to
};
// Type containing device pointers that can be cheaply copied on the kernel
class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const NDCGLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
if (dgroup_dcgs_[gidx] == 0.0) return 0.0f;
uint32_t group_begin = dgroups_[gidx];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return NDCGLambdaWeightComputer::ComputeDeltaWeight(
pos_pred_pos, neg_pred_pos,
static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]),
dgroup_dcgs_[gidx]);
}
private:
const common::Span<const float> dgroup_dcgs_; // Group DCG values
};
NDCGLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f),
weight_multiplier_(segment_label_sorter, *this) {
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements DCG values and reduce them across groups concurrently.
auto end_range =
thrust::reduce_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of items DCG values within a group
dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()),
ComputeItemDCG(segment_label_sorter.GetItemsSpan(),
segment_label_sorter.GetGroupsSpan(),
group_segments)),
thrust::make_discard_iterator(), // We don't care for the group indices
dgroup_dcg_.begin()); // Sum of the item's DCG values in the group
CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size());
}
inline const common::Span<const float> GetGroupDcgsSpan() const {
return { dgroup_dcg_.data().get(), dgroup_dcg_.size() };
}
inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const {
return weight_multiplier_;
}
#endif
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
float IDCG; // NOLINT
{
std::vector<bst_float> labels(sorted_list.size());
for (size_t i = 0; i < sorted_list.size(); ++i) {
labels[i] = sorted_list[i].label;
}
std::stable_sort(labels.begin(), labels.end(), std::greater<>());
IDCG = ComputeGroupDCGWeight(&labels[0], labels.size());
}
if (IDCG == 0.0) {
for (auto & pair : pairs) {
pair.weight = 0.0f;
}
} else {
for (auto & pair : pairs) {
unsigned pos_idx = pair.pos_index;
unsigned neg_idx = pair.neg_index;
pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx,
sorted_list[pos_idx].label, sorted_list[neg_idx].label,
IDCG);
}
}
}
static char const* Name() {
return "rank:ndcg";
}
inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) {
double sumdcg = 0.0;
for (uint32_t i = 0; i < size; ++i) {
sumdcg += ComputeItemDCGWeight(sorted_labels[i], i);
}
return static_cast<bst_float>(sumdcg);
}
private:
XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) {
return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0;
}
// Compute the weight adjustment for an item within a group:
// pos_pred_pos => Where does the positive label live, had the list been sorted by prediction
// neg_pred_pos => Where does the negative label live, had the list been sorted by prediction
// pos_label => positive label value from sorted label list
// neg_label => negative label value from sorted label list
XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos,
uint32_t neg_pred_pos,
int pos_label, int neg_label,
float idcg) {
float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f);
float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f);
bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv;
float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv;
bst_float delta = (original - changed) * (1.0f / idcg);
if (delta < 0.0f) delta = - delta;
return delta;
}
#if defined(__CUDACC__)
dh::caching_device_vector<float> dgroup_dcg_;
// This computes the adjustment to the weight
const NDCGLambdaWeightMultiplier weight_multiplier_;
#endif
};
class MAPLambdaWeightComputer
#if defined(__CUDACC__)
: public IndexablePredictionSorter
#endif
{
public:
struct MAPStats {
/*! \brief the accumulated precision */
float ap_acc{0.0f};
/*!
* \brief the accumulated precision,
* assuming a positive instance is missing
*/
float ap_acc_miss{0.0f};
/*!
* \brief the accumulated precision,
* assuming that one more positive instance is inserted ahead
*/
float ap_acc_add{0.0f};
/* \brief the accumulated positive instance count */
float hits{0.0f};
XGBOOST_DEVICE MAPStats() {} // NOLINT
XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits)
: ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {}
// For prefix scan
XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const {
return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss,
ap_acc_add + v1.ap_acc_add, hits + v1.hits};
}
// For test purposes - compare for equality
XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const {
return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss &&
ap_acc_add == rhs.ap_acc_add && hits == rhs.hits;
}
};
private:
template <typename T>
XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) {
#if defined(__CUDACC__)
thrust::swap(v0, v1);
#else
std::swap(v0, v1);
#endif
}
/*!
* \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or
* neg_pred_pos when sorted by predictions
* \param pos_pred_pos positive label's prediction value position when the groups prediction
* values are sorted
* \param neg_pred_pos negative label's prediction value position when the groups prediction
* values are sorted
* \param pos_label, neg_label the chosen positive and negative labels
* \param p_map_stats a vector containing the accumulated precisions for each position in a list
* \param map_stats_size size of the accumulated precisions vector
*/
XGBOOST_DEVICE inline static bst_float GetLambdaMAP(
int pos_pred_pos, int neg_pred_pos,
bst_float pos_label, bst_float neg_label,
const MAPStats *p_map_stats, uint32_t map_stats_size) {
if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) {
return 0.0f;
}
if (pos_pred_pos > neg_pred_pos) {
Swap(pos_pred_pos, neg_pred_pos);
Swap(pos_label, neg_label);
}
bst_float original = p_map_stats[neg_pred_pos].ap_acc;
if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc;
bst_float changed = 0;
bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f;
bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f;
if (label1 == label2) {
return 0.0;
} else if (label1 < label2) {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add;
changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1);
} else {
changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss;
changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1);
}
bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits);
if (ans < 0) ans = -ans;
return ans;
}
public:
/*
* \brief obtain preprocessing results for calculating delta MAP
* \param sorted_list the list containing entry information
* \param map_stats a vector containing the accumulated precisions for each position in a list
*/
inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list,
std::vector<MAPStats> *p_map_acc) {
std::vector<MAPStats> &map_acc = *p_map_acc;
map_acc.resize(sorted_list.size());
bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (size_t i = 1; i <= sorted_list.size(); ++i) {
if (sorted_list[i - 1].label > 0.0f) {
hit++;
acc1 += hit / i;
acc2 += (hit - 1) / i;
acc3 += (hit + 1) / i;
}
map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit);
}
}
static char const* Name() {
return "rank:map";
}
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list,
std::vector<LambdaPair> *io_pairs) {
std::vector<LambdaPair> &pairs = *io_pairs;
std::vector<MAPStats> map_stats;
GetMAPStats(sorted_list, &map_stats);
for (auto & pair : pairs) {
pair.weight *=
GetLambdaMAP(pair.pos_index, pair.neg_index,
sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label,
&map_stats[0], map_stats.size());
}
}
#if defined(__CUDACC__)
MAPLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter),
dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()),
weight_multiplier_(segment_label_sorter, *this) {
this->CreateMAPStats(dlabels, segment_label_sorter);
}
void CreateMAPStats(const bst_float *dlabels,
const dh::SegmentSorter<float> &segment_label_sorter) {
// For each group, go through the sorted prediction positions, and look up its corresponding
// label from the unsorted labels (from the original label list)
// For each item in the group, compute its MAP stats.
// Interleave the computation of map stats amongst different groups.
// First, determine postive labels in the dataset individually
auto nitems = segment_label_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> dhits(nitems, 0);
// Original positions of the predictions after they have been sorted
const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan();
// Unsorted labels
const float *unsorted_labels = dlabels;
auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) {
return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
dhits.begin(),
DeterminePositiveLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the positive labels that are segmented to accumulate them.
// This is required for computing the accumulated precisions
const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan();
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
dhits.begin(), // Input value
dhits.begin()); // In-place scan
// Compute accumulated precisions for each item, assuming positive and
// negative instances are missing.
// But first, compute individual item precisions
const auto *dhits_arr = dhits.data().get();
// Group info on device
const auto &dgroups = segment_label_sorter.GetGroupsSpan();
auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) {
if (unsorted_labels[pred_original_pos[idx]] > 0.0f) {
auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1;
return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group,
static_cast<float>(dhits_arr[idx] - 1) / idx_within_group,
static_cast<float>(dhits_arr[idx] + 1) / idx_within_group,
1.0f};
}
return MAPStats{};
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
this->dmap_stats_.begin(),
ComputeItemPrecisionLambda);
// Lastly, compute the accumulated precisions for all the items segmented by groups.
// The precisions are accumulated within each group
thrust::inclusive_scan_by_key(thrust::cuda::par(alloc),
dh::tcbegin(group_segments), dh::tcend(group_segments),
this->dmap_stats_.begin(), // Input map stats
this->dmap_stats_.begin()); // In-place scan and output here
}
inline const common::Span<const MAPStats> GetMapStatsSpan() const {
return { dmap_stats_.data().get(), dmap_stats_.size() };
}
// Type containing device pointers that can be cheaply copied on the kernel
class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier {
public:
MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter,
const MAPLambdaWeightComputer &lwc)
: BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()),
dmap_stats_(lwc.GetMapStatsSpan()) {}
// Adjust the items weight by this value
__device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const {
uint32_t group_begin = dgroups_[gidx];
uint32_t group_end = dgroups_[gidx + 1];
auto pos_lab_orig_posn = dorig_pos_[pidx];
auto neg_lab_orig_posn = dorig_pos_[nidx];
KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn);
// Note: the label positive and negative indices are relative to the entire dataset.
// Hence, scale them back to an index within the group
auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin;
auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin;
return MAPLambdaWeightComputer::GetLambdaMAP(
pos_pred_pos, neg_pred_pos,
dsorted_labels_[pidx], dsorted_labels_[nidx],
&dmap_stats_[group_begin], group_end - group_begin);
}
private:
common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted
// prediction value
};
inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; }
private:
dh::caching_device_vector<MAPStats> dmap_stats_;
// This computes the adjustment to the weight
const MAPLambdaWeightMultiplier weight_multiplier_;
#endif
};
#if defined(__CUDACC__)
class SortedLabelList : dh::SegmentSorter<float> {
private:
const LambdaRankParam ¶m_; // Objective configuration
public:
explicit SortedLabelList(const LambdaRankParam ¶m)
: param_(param) {}
// Sort the labels that are grouped by 'groups'
void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) {
this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups);
}
// This kernel can only run *after* the kernel in sort is completed, as they
// use the default stream
template <typename LambdaWeightComputerT>
void ComputeGradients(const bst_float *dpreds, // Unsorted predictions
const bst_float *dlabels, // Unsorted labels
const HostDeviceVector<bst_float> &weights,
int iter,
GradientPair *out_gpair,
float weight_normalization_factor) {
// Group info on device
const auto &dgroups = this->GetGroupsSpan();
uint32_t ngroups = this->GetNumGroups() + 1;
uint32_t total_items = this->GetNumItems();
uint32_t niter = param_.num_pairsample * total_items;
float fix_list_weight = param_.fix_list_weight;
const auto &original_pos = this->GetOriginalPositionsSpan();
uint32_t num_weights = weights.Size();
auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr;
const auto &sorted_labels = this->GetItemsSpan();
// This is used to adjust the weight of different elements based on the different ranking
// objective function policies
LambdaWeightComputerT weight_computer(dpreds, dlabels, *this);
auto wmultiplier = weight_computer.GetWeightMultiplier();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each instance in the group, compute the gradient pair concurrently
dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) {
// First, determine the group 'idx' belongs to
uint32_t item_idx = idx % total_items;
uint32_t group_idx =
thrust::upper_bound(thrust::seq, dgroups.begin(),
dgroups.begin() + ngroups, item_idx) -
dgroups.begin();
// Span of this group within the larger labels/predictions sorted tuple
uint32_t group_begin = dgroups[group_idx - 1];
uint32_t group_end = dgroups[group_idx];
uint32_t total_group_items = group_end - group_begin;
// Are the labels diverse enough? If they are all the same, then there is nothing to pick
// from another group - bail sooner
if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return;
// Find the number of labels less than and greater than the current label
// at the sorted index position item_idx
uint32_t nleft = CountNumItemsToTheLeftOf(
sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]);
uint32_t nright = CountNumItemsToTheRightOf(
sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]);
// Create a minstd_rand object to act as our source of randomness
thrust::minstd_rand rng((iter + 1) * 1111);
rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin);
// Create a uniform_int_distribution to produce a sample from outside of the
// present label group
thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1);
int sample = dist(rng);
int pos_idx = -1; // Bigger label
int neg_idx = -1; // Smaller label
// Are we picking a sample to the left/right of the current group?
if (sample < nleft) {
// Go left
pos_idx = sample + group_begin;
neg_idx = item_idx;
} else {
pos_idx = item_idx;
uint32_t items_in_group = total_group_items - nleft - nright;
neg_idx = sample + items_in_group + group_begin;
}
// Compute and assign the gradients now
const float eps = 1e-16f;
bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]);
bst_float g = p - 1.0f;
bst_float h = thrust::max(p * (1.0f - p), eps);
// Rescale each gradient and hessian so that the group has a weighted constant
float scale = __frcp_ru(niter / total_items);
if (fix_list_weight != 0.0f) {
scale *= fix_list_weight / total_group_items;
}
float weight = num_weights ? dweights[group_idx - 1] : 1.0f;
weight *= weight_normalization_factor;
weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx);
weight *= scale;
// Accumulate gradient and hessian in both positive and negative indices
const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair);
const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h);
dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair);
});
// Wait until the computations done by the kernel is complete
dh::safe_cuda(cudaStreamSynchronize(nullptr));
}
};
#endif
// objective for lambda rank
template <typename LambdaWeightComputerT>
class LambdaRankObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match";
// quick consistency when group is not available
std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size())
<< "group structure not consistent with #rows" << ", "
<< "group ponter size: " << gptr.size() << ", "
<< "labels size: " << info.labels_.Size() << ", "
<< "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back());
#if defined(__CUDACC__)
// Check if we have a GPU assignment; else, revert back to CPU
auto device = tparam_->gpu_id;
if (device >= 0) {
ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr);
} else {
// Revert back to CPU
#endif
ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr);
#if defined(__CUDACC__)
}
#endif
}
const char* DefaultEvalMetric() const override {
return "map";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(LambdaWeightComputerT::Name());
out["lambda_rank_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["lambda_rank_param"], ¶m_);
}
private:
bst_float ComputeWeightNormalizationFactor(const MetaInfo& info,
const std::vector<unsigned> &gptr) {
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
bst_float sum_weights = 0;
for (bst_omp_uint k = 0; k < ngroup; ++k) {
sum_weights += info.GetWeight(k);
}
return ngroup / sum_weights;
}
void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU.";
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
const auto& preds_h = preds.HostVector();
const auto& labels = info.labels_.HostVector();
std::vector<GradientPair>& gpair = out_gpair->HostVector();
const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1);
out_gpair->Resize(preds.Size());
#pragma omp parallel
{
// parallel construct, declare random number generator here, so that each
// thread use its own random number generator, seed by thread id and current iteration
std::minstd_rand rnd((iter + 1) * 1111);
std::vector<LambdaPair> pairs;
std::vector<ListEntry> lst;
std::vector< std::pair<bst_float, unsigned> > rec;
#pragma omp for schedule(static)
for (bst_omp_uint k = 0; k < ngroup; ++k) {
lst.clear(); pairs.clear();
for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) {
lst.emplace_back(preds_h[j], labels[j], j);
gpair[j] = GradientPair(0.0f, 0.0f);
}
std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred);
rec.resize(lst.size());
for (unsigned i = 0; i < lst.size(); ++i) {
rec[i] = std::make_pair(lst[i].label, i);
}
std::stable_sort(rec.begin(), rec.end(), common::CmpFirst);
// enumerate buckets with same label, for each item in the lst, grab another sample randomly
for (unsigned i = 0; i < rec.size(); ) {
unsigned j = i + 1;
while (j < rec.size() && rec[j].first == rec[i].first) ++j;
// bucket in [i,j), get a sample outside bucket
unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j);
if (nleft + nright != 0) {
int nsample = param_.num_pairsample;
while (nsample --) {
for (unsigned pid = i; pid < j; ++pid) {
unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd);
if (ridx < nleft) {
pairs.emplace_back(rec[ridx].second, rec[pid].second,
info.GetWeight(k) * weight_normalization_factor);
} else {
pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second,
info.GetWeight(k) * weight_normalization_factor);
}
}
}
}
i = j;
}
// get lambda weight for the pairs
LambdaWeightComputerT::GetLambdaWeight(lst, &pairs);
// rescale each gradient and hessian so that the lst have constant weighted
float scale = 1.0f / param_.num_pairsample;
if (param_.fix_list_weight != 0.0f) {
scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]);
}
for (auto & pair : pairs) {
const ListEntry &pos = lst[pair.pos_index];
const ListEntry &neg = lst[pair.neg_index];
const bst_float w = pair.weight * scale;
const float eps = 1e-16f;
bst_float p = common::Sigmoid(pos.pred - neg.pred);
bst_float g = p - 1.0f;
bst_float h = std::max(p * (1.0f - p), eps);
// accumulate gradient and hessian in both pid, and nid
gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h);
gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h);
}
}
}
}
#if defined(__CUDACC__)
void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair,
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU.";
auto device = tparam_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);
// Set the device ID and copy them to the device
out_gpair->SetDevice(device);
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
out_gpair->Resize(preds.Size());
auto d_preds = preds.ConstDevicePointer();
auto d_gpair = out_gpair->DevicePointer();
auto d_labels = info.labels_.ConstDevicePointer();
SortedLabelList slist(param_);
// Sort the labels within the groups on the device
slist.Sort(info.labels_, gptr);
// Initialize the gradients next
out_gpair->Fill(GradientPair(0.0f, 0.0f));
// Finally, compute the gradients
slist.ComputeGradients<LambdaWeightComputerT>
(d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor);
}
#endif
LambdaRankParam param_;
};
#if !defined(GTEST_TEST)
// register the objective functions
DMLC_REGISTER_PARAMETER(LambdaRankParam);
XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name())
.describe("Pairwise rank objective.")
.set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name())
.describe("LambdaRank with NDCG as objective.")
.set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); });
XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name())
.describe("LambdaRank with MAP as objective.")
.set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); });
#endif
} // namespace obj
} // namespace xgboost
|
d0c0183b2bd7e08cb899ecb8d52fbae3ea69e2af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void oftKernel (T* out,
T* in,
const unsigned int sd_size,
const unsigned int block_size,
const unsigned int I,
const unsigned int L)
{
const unsigned int sd_id = static_cast<int> (threadIdx.x / L); //automatically rounded down in int arithmetics
const unsigned int id = threadIdx.x - sd_id * L;
const unsigned int sd_start = blockIdx.x * blockDim.x * I + sd_id * L * I;
for (unsigned int i = 0; i < I; i++)
{
const unsigned el_id = sd_start + i * L + id;
((T*) out)[el_id] = ((T*) in)[el_id];
// out[el_id] = in[el_id];
// ((T*) out)[0] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int Tindx = 1; Tindx <= 5; Tindx++)
{
std::cout << "(";
for(int j = 0; j <= 10; j++)
{
unsigned int I = 1 << j;
unsigned int size = 1 << 30;
unsigned int L = 32;
unsigned int N = 16;
unsigned int sd_size;
switch(Tindx)
{
case 1 :
sd_size = I * L * sizeof(char);
break;
case 2 :
sd_size = I * L * sizeof(short);
break;
case 3 :
sd_size = I * L * sizeof(int);
break;
case 4 :
sd_size = I * L * sizeof(int2);
break;
case 5 :
sd_size = I * L * sizeof(int4);
break;
}
unsigned int block_size = sd_size * N;
unsigned int block_amount = size / block_size;
void* out;
void* in;
auto err1 = hipMalloc(&out, block_size * block_amount);
auto err2 = hipMalloc(&in, block_size * block_amount);
// size_t free;
// size_t total;
// auto err3 = hipMemGetInfo(&free, &total);
if (err2 != hipSuccess)
{
std::cout << "ERROR: " << hipGetErrorString(err2) << std::endl;
}
// for (int x = 1; x <= 10; x++) {
// hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, out, in, sd_size, block_size, I, L);
// hipDeviceSynchronize();
// }
// std::cout<<"free:" <<free << " total:" << total << " savedArrays: " << (total - free)/ (block_size * block_amount) << " j:" << j << " Tindx:" << Tindx << std::endl;
// hipFree(out);
// hipFree(in);
//make a warmup
switch(Tindx)
{
case 1 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<char*> (out), static_cast<char*> (in), sd_size, block_size, I, L);
break;
case 2 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<short*> (out), static_cast<short*> (in), sd_size, block_size, I, L);
break;
case 3 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int*> (out), static_cast<int*> (in), sd_size, block_size, I, L);
break;
case 4 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int2*> (out), static_cast<int2*> (in), sd_size, block_size, I, L);
break;
case 5 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
break;
}
hipDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 100; x++)//run 100 times for better measurement accuracy
{
switch(Tindx)
{
case 1 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<char*> (out), static_cast<char*> (in), sd_size, block_size, I, L);
break;
case 2 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<short*> (out), static_cast<short*> (in), sd_size, block_size, I, L);
break;
case 3 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int*> (out), static_cast<int*> (in), sd_size, block_size, I, L);
break;
case 4 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int2*> (out), static_cast<int2*> (in), sd_size, block_size, I, L);
break;
case 5 :
hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
break;
}
hipDeviceSynchronize();
auto lstErr = hipGetLastError();
if ( hipSuccess != lstErr )
{
std::cout << lstErr << ": " << hipGetErrorString(lstErr) << std::endl;
}
}
// hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, out, in, sd_size, block_size, I, L);
// std::cout<< "size of out:" << sizeof(out) << "tindx:" << Tindx << " block_amount:" << block_amount << " L:" << L << " N:" << N << " block_size: " << block_size << std::endl;
// hipDeviceSynchronize();
//hipLaunchKernelGGL(( oftKernel), dim3(block_amount), dim3(L * N) , 0, 0, static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
// hipDeviceSynchronize();
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
std::cout << time_span.count();
hipFree(out);
hipFree(in);
if( j != 10) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( Tindx != 5) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
| d0c0183b2bd7e08cb899ecb8d52fbae3ea69e2af.cu | #include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void oftKernel (T* out,
T* in,
const unsigned int sd_size,
const unsigned int block_size,
const unsigned int I,
const unsigned int L)
{
const unsigned int sd_id = static_cast<int> (threadIdx.x / L); //automatically rounded down in int arithmetics
const unsigned int id = threadIdx.x - sd_id * L;
const unsigned int sd_start = blockIdx.x * blockDim.x * I + sd_id * L * I;
for (unsigned int i = 0; i < I; i++)
{
const unsigned el_id = sd_start + i * L + id;
((T*) out)[el_id] = ((T*) in)[el_id];
// out[el_id] = in[el_id];
// ((T*) out)[0] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int Tindx = 1; Tindx <= 5; Tindx++)
{
std::cout << "(";
for(int j = 0; j <= 10; j++)
{
unsigned int I = 1 << j;
unsigned int size = 1 << 30;
unsigned int L = 32;
unsigned int N = 16;
unsigned int sd_size;
switch(Tindx)
{
case 1 :
sd_size = I * L * sizeof(char);
break;
case 2 :
sd_size = I * L * sizeof(short);
break;
case 3 :
sd_size = I * L * sizeof(int);
break;
case 4 :
sd_size = I * L * sizeof(int2);
break;
case 5 :
sd_size = I * L * sizeof(int4);
break;
}
unsigned int block_size = sd_size * N;
unsigned int block_amount = size / block_size;
void* out;
void* in;
auto err1 = cudaMalloc(&out, block_size * block_amount);
auto err2 = cudaMalloc(&in, block_size * block_amount);
// size_t free;
// size_t total;
// auto err3 = cudaMemGetInfo(&free, &total);
if (err2 != cudaSuccess)
{
std::cout << "ERROR: " << cudaGetErrorString(err2) << std::endl;
}
// for (int x = 1; x <= 10; x++) {
// oftKernel<<<block_amount, L * N >>> (out, in, sd_size, block_size, I, L);
// cudaDeviceSynchronize();
// }
// std::cout<<"free:" <<free << " total:" << total << " savedArrays: " << (total - free)/ (block_size * block_amount) << " j:" << j << " Tindx:" << Tindx << std::endl;
// cudaFree(out);
// cudaFree(in);
//make a warmup
switch(Tindx)
{
case 1 :
oftKernel<<<block_amount, L * N >>> (static_cast<char*> (out), static_cast<char*> (in), sd_size, block_size, I, L);
break;
case 2 :
oftKernel<<<block_amount, L * N >>> (static_cast<short*> (out), static_cast<short*> (in), sd_size, block_size, I, L);
break;
case 3 :
oftKernel<<<block_amount, L * N >>> (static_cast<int*> (out), static_cast<int*> (in), sd_size, block_size, I, L);
break;
case 4 :
oftKernel<<<block_amount, L * N >>> (static_cast<int2*> (out), static_cast<int2*> (in), sd_size, block_size, I, L);
break;
case 5 :
oftKernel<<<block_amount, L * N >>> (static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
break;
}
cudaDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 100; x++)//run 100 times for better measurement accuracy
{
switch(Tindx)
{
case 1 :
oftKernel<<<block_amount, L * N >>> (static_cast<char*> (out), static_cast<char*> (in), sd_size, block_size, I, L);
break;
case 2 :
oftKernel<<<block_amount, L * N >>> (static_cast<short*> (out), static_cast<short*> (in), sd_size, block_size, I, L);
break;
case 3 :
oftKernel<<<block_amount, L * N >>> (static_cast<int*> (out), static_cast<int*> (in), sd_size, block_size, I, L);
break;
case 4 :
oftKernel<<<block_amount, L * N >>> (static_cast<int2*> (out), static_cast<int2*> (in), sd_size, block_size, I, L);
break;
case 5 :
oftKernel<<<block_amount, L * N >>> (static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
break;
}
cudaDeviceSynchronize();
auto lstErr = cudaGetLastError();
if ( cudaSuccess != lstErr )
{
std::cout << lstErr << ": " << cudaGetErrorString(lstErr) << std::endl;
}
}
// oftKernel<<<block_amount, L * N >>> (out, in, sd_size, block_size, I, L);
// std::cout<< "size of out:" << sizeof(out) << "tindx:" << Tindx << " block_amount:" << block_amount << " L:" << L << " N:" << N << " block_size: " << block_size << std::endl;
// cudaDeviceSynchronize();
// oftKernel<<<block_amount, L * N >>> (static_cast<int4*> (out), static_cast<int4*> (in), sd_size, block_size, I, L);
// cudaDeviceSynchronize();
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
std::cout << time_span.count();
cudaFree(out);
cudaFree(in);
if( j != 10) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( Tindx != 5) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
|
d926d50c3201f349dd738c72faa13e390a131c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_incremental_map_track.h"
#include "gpu_track_kernels.h"
#include "gpu_defines.h"
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <memory.h>
#include <malloc.h>
namespace SySal
{
namespace GPU
{
PrismMapTracker::Tracker::Tracker() :
CTOR_INIT(pTracks),
CTOR_INIT(pHostTracks),
CTOR_INIT(pChainMapHeader),
CTOR_INIT(pInternalInfo),
CTOR_INIT(ppChainMapViewEntryPoints),
CTOR_INIT(pHashTable),
CTOR_INIT(pKernelContinue),
CTOR_INIT(pTrackGrains),
CTOR_INIT(pScheduler),
CTOR_INIT(pSchedulerCompactor),
CTOR_INIT(pPairComputer),
CTOR_INIT(pPairIndices),
CTOR_INIT(pSegFindTrackStatus),
CTOR_INIT(pBinFill),
CTOR_INIT(ppBins),
CTOR_INIT(pTBinFill),
CTOR_INIT(pCountTempTracks),
CTOR_INIT(pTBins),
CTOR_INIT(ppTempTracks),
_MergeTracksKernel_LoopLimiter_(0x7fffffff)
{
C.XYTolerance = 0.23 * GetXYScale();
C.ZTolerance = 1.0 * GetZScale();
C.ZThickness = 50 * GetZScale();
C.HashBinCapacity = 6;
C.XYHashTableBinSize = 20 * GetXYScale();
C.ZHashTableBinSize = 2 * GetZScale();
C.ZHashTableBins = 25;
C.MinLength = 20 * GetXYScale();
C.MaxLength = 60 * GetXYScale();
C.MaxTracks = 1000;
C.MinVolume = 30;
C.MinChainVolume = 10;
C.SlopeCenterX = 0;
C.SlopeCenterY = 0;
C.SlopeAcceptanceX = 0;
C.SlopeAcceptanceY = 0;
C.FilterVolumeLength0 = 40;
C.FilterVolumeLength100 = 140;
C.FilterChain0 = 4;
C.FilterChainMult = 4.0f;
C.MergeTrackCell = 150 * GetXYScale();
C.MergeTrackXYTolerance = 2 * GetXYScale();
C.MergeTrackZTolerance = 3 * GetZScale();
C.ClusterVol0 = 220;
C.ClusterVolM = 10;
}
PrismMapTracker::Tracker::~Tracker()
{
DEALLOC(ppTempTracks);
DEALLOC(pTBins);
DEALLOC(pCountTempTracks);
DEALLOC(pTBinFill);
DEALLOC(ppBins);
DEALLOC(pBinFill);
DEALLOC(pPairIndices);
DEALLOC(pPairComputer);
DEALLOC(pSchedulerCompactor);
DEALLOC(pScheduler);
DEALLOC(pSegFindTrackStatus);
DEALLOC(pTrackGrains);
DEALLOC(pKernelContinue);
DEALLOC(pHashTable);
DEALLOC(ppChainMapViewEntryPoints);
DEALLOC(pInternalInfo);
DEALLOC(pChainMapHeader);
DEALLOC(pTracks);
HOST_DEALLOC(pHostTracks);
}
void PrismMapTracker::Tracker::Reset(SySal::Tracker::Configuration &c)
{
hipError_t err;
C = c;
THROW_ON_CUDA_ERR(hipSetDevice(pThis->m_DeviceId));
HOST_WISE_ALLOC(pHostTracks, sizeof(TrackMapHeader));
pHostTracks->Count = 0;
pHostTracks->TotalGrains = 0;
memset(pHostTracks->Reserved, 0, sizeof(short) * 8);
WISE_ALLOC(pTracks, sizeof(TrackMapHeader) + C.MaxTracks * sizeof(IntTrack));
THROW_ON_CUDA_ERR(hipMemcpy(pTracks, pHostTracks, sizeof(TrackMapHeader), hipMemcpyHostToDevice));
EXACT_ALLOC(pKernelContinue, sizeof(int));
EXACT_ALLOC(pInternalInfo, sizeof(InternalInfo));
EXACT_ALLOC(pCountTempTracks, sizeof(int));
EXACT_ALLOC(ppChainMapViewEntryPoints, sizeof(ChainView *) * 2);
THROW_ON_CUDA_ERR(hipMemcpy(&pInternalInfo->C, &C, sizeof(C), hipMemcpyHostToDevice));
}
void PrismMapTracker::Tracker::InternalFindTracks(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
hipError_t err;
THROW_ON_CUDA_ERR(hipSetDevice(pThis->m_DeviceId));
HashTableBounds hostHashTableBounds;
WISE_ALLOC(pTracks, sizeof(TrackMapHeader) + C.MaxTracks * sizeof(IntTrack));
/* We explore the ChainMap here and define the bounds. */
{
dim3 ithreads(1,1,1);
dim3 iblocks(1,1,1);
hipLaunchKernelGGL(( explore_skewchainmap_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pLastView, pThisView, width, height, pInternalInfo, ppChainMapViewEntryPoints, pTracks);
}
THROW_ON_CUDA_ERR(hipMemcpy(&hostHashTableBounds, &pInternalInfo->H, sizeof(HashTableBounds), hipMemcpyDeviceToHost));
printf("\nDEBUG DUMP 1 %d", hostHashTableBounds.DEBUG1);
if (C.MaxTracks < hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity)
{
C.MaxTracks = hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity;
printf("\nMaxTracks corrected to %d", C.MaxTracks);
}
printf("\nHashTable Grid: %d %d %d (%d)\n%d %d %d", hostHashTableBounds.XBins, hostHashTableBounds.YBins, hostHashTableBounds.ZBins, hostHashTableBounds.NBins, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity);
// WISE_ALLOC(pTrackGrains, sizeof(IntChain) * C.MaxTracks * _MAX_GRAINS_PER_TRACK_);
WISE_ALLOC(pBinFill, (sizeof(int) * hostHashTableBounds.NBins) * 2);
ppBins = 0;
WISE_ALLOC(pTBinFill, (sizeof(int) * hostHashTableBounds.NTBins));
hipMemset(pCountTempTracks, 0, sizeof(int));
WISE_ALLOC(pTBins, (hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity * sizeof(TempIntTrack)));
WISE_ALLOC(ppTempTracks, (sizeof(TempIntTrack *) * C.MaxTracks));
printf("\nDEBUG %d %d %d %d", hostHashTableBounds.NBins * sizeof(int), hostHashTableBounds.NTBins * sizeof(int), C.HashBinCapacity * hostHashTableBounds.NBins * sizeof(IntChain *), hostHashTableBounds.TBinCapacity * sizeof(TempIntTrack) * hostHashTableBounds.NTBins);
hipMemset(pTBinFill, 0, sizeof(int) * (1 + hostHashTableBounds.NTBins));
_CUDA_THROW_ERR_
float vs = (((C.FilterVolumeLength100 - C.FilterVolumeLength0) * 0.01) / (1 << XY_SCALE_SHIFT));
float cm = C.FilterChainMult / (1 << SLOPE_SCALE_SHIFT);
printf("\nDEBUG VS: %f", vs);
printf("\nDEBUG V0: %d", C.FilterVolumeLength0);
long long seekgraintrials = 0;
int lasttemptracks = 0;
/* Fill bins. */
int isx, isy;
int isstep = ((int)(((C.XYHashTableBinSize >> XY_SCALE_SHIFT) << SLOPE_SCALE_SHIFT) / ((float)C.ZThickness / (float)GetZScale() )));
for (isx = C.SlopeCenterX - (C.SlopeAcceptanceX/isstep)*isstep; isx <= C.SlopeCenterX + (C.SlopeAcceptanceX/isstep)*isstep; isx += isstep)
for (isy = C.SlopeCenterY - (C.SlopeAcceptanceY/isstep)*isstep; isy <= C.SlopeCenterY + (C.SlopeAcceptanceY/isstep)*isstep; isy += isstep)
{
hipMemset(pBinFill, 0, sizeof(int) * hostHashTableBounds.NBins * 2);
//hipMemset(pBinFill + hostHashTableBounds.NBins * 2, 0x7E7E7E7E, sizeof(int) * hostHashTableBounds.NBins);
_CUDA_THROW_ERR_
{
dim3 iblocks(pThis->m_Prop.multiProcessorCount /*ChHdr.Views*/, 1/*ChHdr.Views*/, 1);
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
//fill_skewhashtable_list_kernel<<<iblocks, ithreads>>>(pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume);
hipLaunchKernelGGL(( fill_skewhashtable1view_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume, 0);
hipLaunchKernelGGL(( fill_skewhashtable1view_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume, 1);
_CUDA_THROW_ERR_
}
#if 1
{
int totalpaircomputersize = 1;
int paircomputersize = 1;
int depth = 1;
while (paircomputersize < hostHashTableBounds.NBins)
{
paircomputersize <<= 1;
totalpaircomputersize += paircomputersize;
depth++;
}
WISE_ALLOC(pPairComputer, sizeof(int) * totalpaircomputersize);
dim3 pcthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 pcblocks(totalpaircomputersize / pcthreads.x + 1, 1, 1);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, C.FilterChain0, pPairComputer, hostHashTableBounds.NBins);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pPairComputer, hostHashTableBounds.NBins);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, minviewtag, pPairComputer, hostHashTableBounds.NBins);
hipLaunchKernelGGL(( compute_pairs1v_kernel), dim3(pcblocks), dim3(pcthreads), 0, 0, pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, minviewtag, pPairComputer, hostHashTableBounds.NBins);
/*
int *pwDEBUG = new int[hostHashTableBounds.NBins];
int *pzDEBUG = new int[hostHashTableBounds.NBins];
hipMemcpy(pwDEBUG, pBinFill + hostHashTableBounds.NBins, sizeof(int) * hostHashTableBounds.NBins, hipMemcpyDeviceToHost);
hipMemcpy(pzDEBUG, pPairComputer, sizeof(int) * hostHashTableBounds.NBins, hipMemcpyDeviceToHost);
for (int iii = 0; iii < 100; iii++)
if (pwDEBUG[iii])
{
printf("\n%d %08X %d", iii, pwDEBUG[iii], pzDEBUG[iii]);
}
delete [] pzDEBUG;
delete [] pwDEBUG;
*/
int d;
int *pin = pPairComputer;
int *pout;
for (d = 0; d < depth; d++)
{
pout = pin + paircomputersize;
dim3 cthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 cblocks((paircomputersize >> 1) / cthreads.x + 1, 1, 1);
hipLaunchKernelGGL(( recursive_sum_kernel), dim3(cblocks), dim3(cthreads), 0, 0, pin, pout, paircomputersize);
paircomputersize >>= 1;
pin = pout;
}
int totalpairs = 0;
hipMemcpy(&totalpairs, pin, sizeof(int), hipMemcpyDeviceToHost);
//printf("\nDEBUG-TOTALPAIRS %d", totalpairs);
WISE_ALLOC(pPairIndices, sizeof(int) * 3 * totalpairs);
hipMemset(pPairIndices, 0, sizeof(int) * 3 * totalpairs);
dim3 pthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 pblocks((totalpairs / pthreads.x) + 1, 1, 1);
//pair_find_kernel<<<pblocks, pthreads>>>(pin, depth, pBinFill + hostHashTableBounds.NBins, pBinFill, ppChainMapViewEntryPoints, pPairIndices);
hipLaunchKernelGGL(( pair_find1v_kernel), dim3(pblocks), dim3(pthreads), 0, 0, pin, depth, pBinFill + hostHashTableBounds.NBins, pBinFill, ppChainMapViewEntryPoints, pPairIndices);
/*
int *pwDEBUG = new int[totalpairs * 3];
int *pzDEBUG = new int[hostHashTableBounds.NBins];
hipMemcpy(pwDEBUG, pPairIndices, sizeof(int) * totalpairs * 3, hipMemcpyDeviceToHost);
hipMemcpy(pzDEBUG, pBinFill + hostHashTableBounds.NBins, sizeof(int) * hostHashTableBounds.NBins, hipMemcpyDeviceToHost);
for (int iii = 0; iii < 50; iii++)
{
printf("\n%d %08X %08X %08X", iii, pwDEBUG[iii * 3], pwDEBUG[iii * 3 + 1], pwDEBUG[iii * 3 + 2]);
}
for (int iii = 0; iii < 50; iii++)
if (pzDEBUG[iii])
{
printf("\n%d %08X", iii, pzDEBUG[iii]);
}
delete [] pzDEBUG;
delete [] pwDEBUG;
*/
hipLaunchKernelGGL(( find_track_singlepass_kernel), dim3(pblocks), dim3(pthreads), 0, 0, pPairIndices, totalpairs, ppChainMapViewEntryPoints, &pInternalInfo->C, pTBins, pTBinFill, ppTempTracks, pCountTempTracks, vs * vs, cm * cm, C.SlopeCenterX, C.SlopeCenterY, C.SlopeAcceptanceX, C.SlopeAcceptanceY,
/*isx, isy, isstep, isstep,*/ &pInternalInfo->H, minviewtag);
_CUDA_THROW_ERR_
}
#else
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks((hostHashTableBounds.XBins * hostHashTableBounds.YBins) / ithreads.x + 1, 1, 1);
WISE_ALLOC(pSegFindTrackStatus, sizeof(_segmented_findtrack_kernel_status_) * iblocks.x * ithreads.x);
WISE_ALLOC(pScheduler, sizeof(int) * (1 + ithreads.x * iblocks.x) * 2);
hipLaunchKernelGGL(( reset_scheduler_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pScheduler, ithreads.x * iblocks.x, 0);
hipLaunchKernelGGL(( find_tracks_skewreset_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pSegFindTrackStatus, pBinFill, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, pScheduler);
_CUDA_THROW_ERR_
int terminate;
int ln = 0;
do
{
hipLaunchKernelGGL(( find_tracks_skewslope_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pSegFindTrackStatus, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity,
isx/*C.SlopeCenterX*/, isy/*C.SlopeCenterY*/, isstep/*C.SlopeAcceptanceX*/, isstep/*C.SlopeAcceptanceY*/,
C.MinLength, pScheduler);
_CUDA_THROW_ERR_
int ix = 0, iy = 0;
//for (ix = -1; ix <= 1; ix++)
// for (iy = -1; iy <= 1; iy++)
{
hipLaunchKernelGGL(( find_tracks_skewgrainseek_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pSegFindTrackStatus, ppChainMapViewEntryPoints, pBinFill, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, C.XYTolerance, C.ZTolerance << Z_TO_XY_RESCALE_SHIFT, /*ix, iy, */pScheduler);
_CUDA_THROW_ERR_
}
/*
{
_segmented_findtrack_kernel_status_ *pXdebug = new _segmented_findtrack_kernel_status_[iblocks.x * ithreads.x];
hipMemcpy(pXdebug, pSegFindTrackStatus, sizeof(_segmented_findtrack_kernel_status_) * iblocks.x * ithreads.x, hipMemcpyDeviceToHost);
int ixi;
for (ixi = 0; ixi < iblocks.x * ithreads.x; ixi++)
if (pXdebug[ixi].SearchGrains)
seekgraintrials++;
delete [] pXdebug;
}
*/
hipLaunchKernelGGL(( find_tracks_skewchecktrack_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pSegFindTrackStatus, &pInternalInfo->C, pTBins, pTBinFill, ppTempTracks, pCountTempTracks, vs * vs, cm * cm, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity, minviewtag, pScheduler);
_CUDA_THROW_ERR_
hipLaunchKernelGGL(( reset_scheduler_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pScheduler + (1 + ithreads.x * iblocks.x), 0, 0);
hipLaunchKernelGGL(( find_tracks_skewincrement_list_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pSegFindTrackStatus, pBinFill, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, pScheduler, C.MinLength >> Z_TO_XY_RESCALE_SHIFT, pScheduler + (1 + iblocks.x * ithreads.x));
#if 1
{
int totallength = 0;
hipMemcpy(&totallength, pScheduler, sizeof(int), hipMemcpyDeviceToHost);
int totalcompactorsize = 1;
int compactorsize = 1;
int depth = 1;
while (compactorsize < totallength)
{
compactorsize <<= 1;
totalcompactorsize += compactorsize;
depth++;
}
WISE_ALLOC(pSchedulerCompactor, sizeof(int) * totalcompactorsize);
int d;
dim3 rthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 rblocks(compactorsize / rthreads.x + 1, 1, 1);
hipLaunchKernelGGL(( reset_compactor_kernel), dim3(rblocks), dim3(rthreads), 0, 0, pScheduler + (2 + ithreads.x * iblocks.x), pSchedulerCompactor, pScheduler);
int *pin = pSchedulerCompactor;
int *pout;
for (d = 0; d < depth; d++)
{
pout = pin + compactorsize;
dim3 cthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 cblocks((compactorsize >> 1) / cthreads.x + 1, 1, 1);
hipLaunchKernelGGL(( recursive_sum_kernel), dim3(cblocks), dim3(cthreads), 0, 0, pin, pout, compactorsize);
compactorsize >>= 1;
pin = pout;
}
hipLaunchKernelGGL(( compactor_find_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pin, depth, pScheduler + (2 + ithreads.x * iblocks.x), pScheduler + 1, pScheduler);
}
#else
hipLaunchKernelGGL(( compact_scheduler_kernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)), 0, 0, pScheduler + (1 + iblocks.x * ithreads.x), pScheduler);
THROW_ON_CUDA_ERR(hipMemcpy(pScheduler, pScheduler + (1 + iblocks.x * ithreads.x), sizeof(int) * (1 + iblocks.x * ithreads.x), hipMemcpyDeviceToDevice));
#endif
THROW_ON_CUDA_ERR(hipMemcpy(&terminate, pScheduler, sizeof(int), hipMemcpyDeviceToHost));
//printf("\nTerminate %d", terminate);
iblocks.x = terminate / ithreads.x + 1;
ln++;
}
while(terminate > 0);
int temptracks = 0;
THROW_ON_CUDA_ERR(hipMemcpy(&temptracks, pCountTempTracks, sizeof(int), hipMemcpyDeviceToHost));
// printf("\nLaunches: %d Slopes %d %d", ln, isx, isy);
printf("\nLaunches: %d Slopes %d %d %d", ln, isx, isy, temptracks - lasttemptracks);
lasttemptracks = temptracks;
}
#endif
}
int temptracks = 0;
THROW_ON_CUDA_ERR(hipMemcpy(&temptracks, pCountTempTracks, sizeof(int), hipMemcpyDeviceToHost));
printf("\nDEBUG TempTracks %d Seekgraintrials %d", temptracks, seekgraintrials);
#if 0
{
int *pwDEBUG = new int[hostHashTableBounds.NTBins];
hipMemcpy(pwDEBUG, pTBinFill, sizeof(int) * hostHashTableBounds.NTBins, hipMemcpyDeviceToHost);
int totaltemptracks = 0;
for (int i = 0; i < hostHashTableBounds.NTBins; i++)
{
// printf("\nDEBUG %d %d", i, pwDEBUG[i]);
totaltemptracks += pwDEBUG[i];
}
printf("\nTotal: %d", totaltemptracks);
delete [] pwDEBUG;
}
#endif
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks(pThis->m_Prop.multiProcessorCount, 1, 1);
WISE_ALLOC(pScheduler, (1 + ithreads.x * iblocks.x) * sizeof(int));
THROW_ON_CUDA_ERR(hipMemset(pScheduler, 0, (1 + ithreads.x * iblocks.x) * sizeof(int)));
int terminate;
int ln = 0;
do
{
THROW_ON_CUDA_ERR(hipMemset(pScheduler, 0xff, sizeof(int)));
hipLaunchKernelGGL(( mergetracks_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pCountTempTracks, ppTempTracks, pTBinFill, pTBins, pTracks, C.MergeTrackXYTolerance, C.MergeTrackZTolerance << Z_TO_XY_RESCALE_SHIFT, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity, pScheduler + 1, pScheduler);
_CUDA_THROW_ERR_
THROW_ON_CUDA_ERR(hipMemcpy(&terminate, pScheduler, sizeof(int), hipMemcpyDeviceToHost));
ln++;
}
while (terminate == 0);
printf("\nLaunches: %d", ln);
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks(temptracks / ithreads.x + 1, 1, 1);
hipLaunchKernelGGL(( filltracks_kernel), dim3(iblocks), dim3(ithreads), 0, 0, pCountTempTracks, ppTempTracks, pTracks);
}
}
THROW_ON_CUDA_ERR(hipMemcpy(pHostTracks, pTracks, sizeof(TrackMapHeader), hipMemcpyDeviceToHost));
int size1 = pHostTracks->TrackSize();
int size2 = pHostTracks->TotalSize() - pHostTracks->TrackSize();
HOST_WISE_ALLOC(pHostTracks, size1 + size2);
THROW_ON_CUDA_ERR(hipMemcpy(pHostTracks, pTracks, size1, hipMemcpyDeviceToHost));
}
}
} | d926d50c3201f349dd738c72faa13e390a131c5d.cu | #include "gpu_incremental_map_track.h"
#include "gpu_track_kernels.h"
#include "gpu_defines.h"
#include "cuda_runtime.h"
#include <stdlib.h>
#include <memory.h>
#include <malloc.h>
namespace SySal
{
namespace GPU
{
PrismMapTracker::Tracker::Tracker() :
CTOR_INIT(pTracks),
CTOR_INIT(pHostTracks),
CTOR_INIT(pChainMapHeader),
CTOR_INIT(pInternalInfo),
CTOR_INIT(ppChainMapViewEntryPoints),
CTOR_INIT(pHashTable),
CTOR_INIT(pKernelContinue),
CTOR_INIT(pTrackGrains),
CTOR_INIT(pScheduler),
CTOR_INIT(pSchedulerCompactor),
CTOR_INIT(pPairComputer),
CTOR_INIT(pPairIndices),
CTOR_INIT(pSegFindTrackStatus),
CTOR_INIT(pBinFill),
CTOR_INIT(ppBins),
CTOR_INIT(pTBinFill),
CTOR_INIT(pCountTempTracks),
CTOR_INIT(pTBins),
CTOR_INIT(ppTempTracks),
_MergeTracksKernel_LoopLimiter_(0x7fffffff)
{
C.XYTolerance = 0.23 * GetXYScale();
C.ZTolerance = 1.0 * GetZScale();
C.ZThickness = 50 * GetZScale();
C.HashBinCapacity = 6;
C.XYHashTableBinSize = 20 * GetXYScale();
C.ZHashTableBinSize = 2 * GetZScale();
C.ZHashTableBins = 25;
C.MinLength = 20 * GetXYScale();
C.MaxLength = 60 * GetXYScale();
C.MaxTracks = 1000;
C.MinVolume = 30;
C.MinChainVolume = 10;
C.SlopeCenterX = 0;
C.SlopeCenterY = 0;
C.SlopeAcceptanceX = 0;
C.SlopeAcceptanceY = 0;
C.FilterVolumeLength0 = 40;
C.FilterVolumeLength100 = 140;
C.FilterChain0 = 4;
C.FilterChainMult = 4.0f;
C.MergeTrackCell = 150 * GetXYScale();
C.MergeTrackXYTolerance = 2 * GetXYScale();
C.MergeTrackZTolerance = 3 * GetZScale();
C.ClusterVol0 = 220;
C.ClusterVolM = 10;
}
PrismMapTracker::Tracker::~Tracker()
{
DEALLOC(ppTempTracks);
DEALLOC(pTBins);
DEALLOC(pCountTempTracks);
DEALLOC(pTBinFill);
DEALLOC(ppBins);
DEALLOC(pBinFill);
DEALLOC(pPairIndices);
DEALLOC(pPairComputer);
DEALLOC(pSchedulerCompactor);
DEALLOC(pScheduler);
DEALLOC(pSegFindTrackStatus);
DEALLOC(pTrackGrains);
DEALLOC(pKernelContinue);
DEALLOC(pHashTable);
DEALLOC(ppChainMapViewEntryPoints);
DEALLOC(pInternalInfo);
DEALLOC(pChainMapHeader);
DEALLOC(pTracks);
HOST_DEALLOC(pHostTracks);
}
void PrismMapTracker::Tracker::Reset(SySal::Tracker::Configuration &c)
{
cudaError_t err;
C = c;
THROW_ON_CUDA_ERR(cudaSetDevice(pThis->m_DeviceId));
HOST_WISE_ALLOC(pHostTracks, sizeof(TrackMapHeader));
pHostTracks->Count = 0;
pHostTracks->TotalGrains = 0;
memset(pHostTracks->Reserved, 0, sizeof(short) * 8);
WISE_ALLOC(pTracks, sizeof(TrackMapHeader) + C.MaxTracks * sizeof(IntTrack));
THROW_ON_CUDA_ERR(cudaMemcpy(pTracks, pHostTracks, sizeof(TrackMapHeader), cudaMemcpyHostToDevice));
EXACT_ALLOC(pKernelContinue, sizeof(int));
EXACT_ALLOC(pInternalInfo, sizeof(InternalInfo));
EXACT_ALLOC(pCountTempTracks, sizeof(int));
EXACT_ALLOC(ppChainMapViewEntryPoints, sizeof(ChainView *) * 2);
THROW_ON_CUDA_ERR(cudaMemcpy(&pInternalInfo->C, &C, sizeof(C), cudaMemcpyHostToDevice));
}
void PrismMapTracker::Tracker::InternalFindTracks(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
cudaError_t err;
THROW_ON_CUDA_ERR(cudaSetDevice(pThis->m_DeviceId));
HashTableBounds hostHashTableBounds;
WISE_ALLOC(pTracks, sizeof(TrackMapHeader) + C.MaxTracks * sizeof(IntTrack));
/* We explore the ChainMap here and define the bounds. */
{
dim3 ithreads(1,1,1);
dim3 iblocks(1,1,1);
explore_skewchainmap_kernel<<<iblocks, ithreads>>>(pLastView, pThisView, width, height, pInternalInfo, ppChainMapViewEntryPoints, pTracks);
}
THROW_ON_CUDA_ERR(cudaMemcpy(&hostHashTableBounds, &pInternalInfo->H, sizeof(HashTableBounds), cudaMemcpyDeviceToHost));
printf("\nDEBUG DUMP 1 %d", hostHashTableBounds.DEBUG1);
if (C.MaxTracks < hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity)
{
C.MaxTracks = hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity;
printf("\nMaxTracks corrected to %d", C.MaxTracks);
}
printf("\nHashTable Grid: %d %d %d (%d)\n%d %d %d", hostHashTableBounds.XBins, hostHashTableBounds.YBins, hostHashTableBounds.ZBins, hostHashTableBounds.NBins, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity);
// WISE_ALLOC(pTrackGrains, sizeof(IntChain) * C.MaxTracks * _MAX_GRAINS_PER_TRACK_);
WISE_ALLOC(pBinFill, (sizeof(int) * hostHashTableBounds.NBins) * 2);
ppBins = 0;
WISE_ALLOC(pTBinFill, (sizeof(int) * hostHashTableBounds.NTBins));
cudaMemset(pCountTempTracks, 0, sizeof(int));
WISE_ALLOC(pTBins, (hostHashTableBounds.NTBins * hostHashTableBounds.TBinCapacity * sizeof(TempIntTrack)));
WISE_ALLOC(ppTempTracks, (sizeof(TempIntTrack *) * C.MaxTracks));
printf("\nDEBUG %d %d %d %d", hostHashTableBounds.NBins * sizeof(int), hostHashTableBounds.NTBins * sizeof(int), C.HashBinCapacity * hostHashTableBounds.NBins * sizeof(IntChain *), hostHashTableBounds.TBinCapacity * sizeof(TempIntTrack) * hostHashTableBounds.NTBins);
cudaMemset(pTBinFill, 0, sizeof(int) * (1 + hostHashTableBounds.NTBins));
_CUDA_THROW_ERR_
float vs = (((C.FilterVolumeLength100 - C.FilterVolumeLength0) * 0.01) / (1 << XY_SCALE_SHIFT));
float cm = C.FilterChainMult / (1 << SLOPE_SCALE_SHIFT);
printf("\nDEBUG VS: %f", vs);
printf("\nDEBUG V0: %d", C.FilterVolumeLength0);
long long seekgraintrials = 0;
int lasttemptracks = 0;
/* Fill bins. */
int isx, isy;
int isstep = ((int)(((C.XYHashTableBinSize >> XY_SCALE_SHIFT) << SLOPE_SCALE_SHIFT) / ((float)C.ZThickness / (float)GetZScale() )));
for (isx = C.SlopeCenterX - (C.SlopeAcceptanceX/isstep)*isstep; isx <= C.SlopeCenterX + (C.SlopeAcceptanceX/isstep)*isstep; isx += isstep)
for (isy = C.SlopeCenterY - (C.SlopeAcceptanceY/isstep)*isstep; isy <= C.SlopeCenterY + (C.SlopeAcceptanceY/isstep)*isstep; isy += isstep)
{
cudaMemset(pBinFill, 0, sizeof(int) * hostHashTableBounds.NBins * 2);
//cudaMemset(pBinFill + hostHashTableBounds.NBins * 2, 0x7E7E7E7E, sizeof(int) * hostHashTableBounds.NBins);
_CUDA_THROW_ERR_
{
dim3 iblocks(pThis->m_Prop.multiProcessorCount /*ChHdr.Views*/, 1/*ChHdr.Views*/, 1);
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
//fill_skewhashtable_list_kernel<<<iblocks, ithreads>>>(pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume);
fill_skewhashtable1view_list_kernel<<<iblocks, ithreads>>>(pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume, 0);
fill_skewhashtable1view_list_kernel<<<iblocks, ithreads>>>(pBinFill, ppChainMapViewEntryPoints, &pInternalInfo->H, isx, isy, C.MinChainVolume, 1);
_CUDA_THROW_ERR_
}
#if 1
{
int totalpaircomputersize = 1;
int paircomputersize = 1;
int depth = 1;
while (paircomputersize < hostHashTableBounds.NBins)
{
paircomputersize <<= 1;
totalpaircomputersize += paircomputersize;
depth++;
}
WISE_ALLOC(pPairComputer, sizeof(int) * totalpaircomputersize);
dim3 pcthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 pcblocks(totalpaircomputersize / pcthreads.x + 1, 1, 1);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, C.FilterChain0, pPairComputer, hostHashTableBounds.NBins);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pPairComputer, hostHashTableBounds.NBins);
//compute_pairs_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, minviewtag, pPairComputer, hostHashTableBounds.NBins);
compute_pairs1v_kernel<<<pcblocks, pcthreads>>>(pBinFill + hostHashTableBounds.NBins, pBinFill + 2 * hostHashTableBounds.NBins, minviewtag, pPairComputer, hostHashTableBounds.NBins);
/*
int *pwDEBUG = new int[hostHashTableBounds.NBins];
int *pzDEBUG = new int[hostHashTableBounds.NBins];
cudaMemcpy(pwDEBUG, pBinFill + hostHashTableBounds.NBins, sizeof(int) * hostHashTableBounds.NBins, cudaMemcpyDeviceToHost);
cudaMemcpy(pzDEBUG, pPairComputer, sizeof(int) * hostHashTableBounds.NBins, cudaMemcpyDeviceToHost);
for (int iii = 0; iii < 100; iii++)
if (pwDEBUG[iii])
{
printf("\n%d %08X %d", iii, pwDEBUG[iii], pzDEBUG[iii]);
}
delete [] pzDEBUG;
delete [] pwDEBUG;
*/
int d;
int *pin = pPairComputer;
int *pout;
for (d = 0; d < depth; d++)
{
pout = pin + paircomputersize;
dim3 cthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 cblocks((paircomputersize >> 1) / cthreads.x + 1, 1, 1);
recursive_sum_kernel<<<cblocks, cthreads>>>(pin, pout, paircomputersize);
paircomputersize >>= 1;
pin = pout;
}
int totalpairs = 0;
cudaMemcpy(&totalpairs, pin, sizeof(int), cudaMemcpyDeviceToHost);
//printf("\nDEBUG-TOTALPAIRS %d", totalpairs);
WISE_ALLOC(pPairIndices, sizeof(int) * 3 * totalpairs);
cudaMemset(pPairIndices, 0, sizeof(int) * 3 * totalpairs);
dim3 pthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 pblocks((totalpairs / pthreads.x) + 1, 1, 1);
//pair_find_kernel<<<pblocks, pthreads>>>(pin, depth, pBinFill + hostHashTableBounds.NBins, pBinFill, ppChainMapViewEntryPoints, pPairIndices);
pair_find1v_kernel<<<pblocks, pthreads>>>(pin, depth, pBinFill + hostHashTableBounds.NBins, pBinFill, ppChainMapViewEntryPoints, pPairIndices);
/*
int *pwDEBUG = new int[totalpairs * 3];
int *pzDEBUG = new int[hostHashTableBounds.NBins];
cudaMemcpy(pwDEBUG, pPairIndices, sizeof(int) * totalpairs * 3, cudaMemcpyDeviceToHost);
cudaMemcpy(pzDEBUG, pBinFill + hostHashTableBounds.NBins, sizeof(int) * hostHashTableBounds.NBins, cudaMemcpyDeviceToHost);
for (int iii = 0; iii < 50; iii++)
{
printf("\n%d %08X %08X %08X", iii, pwDEBUG[iii * 3], pwDEBUG[iii * 3 + 1], pwDEBUG[iii * 3 + 2]);
}
for (int iii = 0; iii < 50; iii++)
if (pzDEBUG[iii])
{
printf("\n%d %08X", iii, pzDEBUG[iii]);
}
delete [] pzDEBUG;
delete [] pwDEBUG;
*/
find_track_singlepass_kernel<<<pblocks, pthreads>>>(pPairIndices, totalpairs, ppChainMapViewEntryPoints, &pInternalInfo->C, pTBins, pTBinFill, ppTempTracks, pCountTempTracks, vs * vs, cm * cm, C.SlopeCenterX, C.SlopeCenterY, C.SlopeAcceptanceX, C.SlopeAcceptanceY,
/*isx, isy, isstep, isstep,*/ &pInternalInfo->H, minviewtag);
_CUDA_THROW_ERR_
}
#else
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks((hostHashTableBounds.XBins * hostHashTableBounds.YBins) / ithreads.x + 1, 1, 1);
WISE_ALLOC(pSegFindTrackStatus, sizeof(_segmented_findtrack_kernel_status_) * iblocks.x * ithreads.x);
WISE_ALLOC(pScheduler, sizeof(int) * (1 + ithreads.x * iblocks.x) * 2);
reset_scheduler_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pScheduler, ithreads.x * iblocks.x, 0);
find_tracks_skewreset_list_kernel<<<iblocks, ithreads>>>(pSegFindTrackStatus, pBinFill, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, pScheduler);
_CUDA_THROW_ERR_
int terminate;
int ln = 0;
do
{
find_tracks_skewslope_list_kernel<<<iblocks, ithreads>>>(pSegFindTrackStatus, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity,
isx/*C.SlopeCenterX*/, isy/*C.SlopeCenterY*/, isstep/*C.SlopeAcceptanceX*/, isstep/*C.SlopeAcceptanceY*/,
C.MinLength, pScheduler);
_CUDA_THROW_ERR_
int ix = 0, iy = 0;
//for (ix = -1; ix <= 1; ix++)
// for (iy = -1; iy <= 1; iy++)
{
find_tracks_skewgrainseek_list_kernel<<<iblocks, ithreads>>>(pSegFindTrackStatus, ppChainMapViewEntryPoints, pBinFill, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, C.XYTolerance, C.ZTolerance << Z_TO_XY_RESCALE_SHIFT, /*ix, iy, */pScheduler);
_CUDA_THROW_ERR_
}
/*
{
_segmented_findtrack_kernel_status_ *pXdebug = new _segmented_findtrack_kernel_status_[iblocks.x * ithreads.x];
cudaMemcpy(pXdebug, pSegFindTrackStatus, sizeof(_segmented_findtrack_kernel_status_) * iblocks.x * ithreads.x, cudaMemcpyDeviceToHost);
int ixi;
for (ixi = 0; ixi < iblocks.x * ithreads.x; ixi++)
if (pXdebug[ixi].SearchGrains)
seekgraintrials++;
delete [] pXdebug;
}
*/
find_tracks_skewchecktrack_kernel<<<iblocks, ithreads>>>(pSegFindTrackStatus, &pInternalInfo->C, pTBins, pTBinFill, ppTempTracks, pCountTempTracks, vs * vs, cm * cm, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity, minviewtag, pScheduler);
_CUDA_THROW_ERR_
reset_scheduler_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pScheduler + (1 + ithreads.x * iblocks.x), 0, 0);
find_tracks_skewincrement_list_kernel<<<iblocks, ithreads>>>(pSegFindTrackStatus, pBinFill, ppChainMapViewEntryPoints, hostHashTableBounds.XBins, hostHashTableBounds.YBins, C.HashBinCapacity, pScheduler, C.MinLength >> Z_TO_XY_RESCALE_SHIFT, pScheduler + (1 + iblocks.x * ithreads.x));
#if 1
{
int totallength = 0;
cudaMemcpy(&totallength, pScheduler, sizeof(int), cudaMemcpyDeviceToHost);
int totalcompactorsize = 1;
int compactorsize = 1;
int depth = 1;
while (compactorsize < totallength)
{
compactorsize <<= 1;
totalcompactorsize += compactorsize;
depth++;
}
WISE_ALLOC(pSchedulerCompactor, sizeof(int) * totalcompactorsize);
int d;
dim3 rthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 rblocks(compactorsize / rthreads.x + 1, 1, 1);
reset_compactor_kernel<<<rblocks, rthreads>>>(pScheduler + (2 + ithreads.x * iblocks.x), pSchedulerCompactor, pScheduler);
int *pin = pSchedulerCompactor;
int *pout;
for (d = 0; d < depth; d++)
{
pout = pin + compactorsize;
dim3 cthreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 cblocks((compactorsize >> 1) / cthreads.x + 1, 1, 1);
recursive_sum_kernel<<<cblocks, cthreads>>>(pin, pout, compactorsize);
compactorsize >>= 1;
pin = pout;
}
compactor_find_kernel<<<iblocks, ithreads>>>(pin, depth, pScheduler + (2 + ithreads.x * iblocks.x), pScheduler + 1, pScheduler);
}
#else
compact_scheduler_kernel<<<dim3(1,1,1), dim3(1,1,1)>>>(pScheduler + (1 + iblocks.x * ithreads.x), pScheduler);
THROW_ON_CUDA_ERR(cudaMemcpy(pScheduler, pScheduler + (1 + iblocks.x * ithreads.x), sizeof(int) * (1 + iblocks.x * ithreads.x), cudaMemcpyDeviceToDevice));
#endif
THROW_ON_CUDA_ERR(cudaMemcpy(&terminate, pScheduler, sizeof(int), cudaMemcpyDeviceToHost));
//printf("\nTerminate %d", terminate);
iblocks.x = terminate / ithreads.x + 1;
ln++;
}
while(terminate > 0);
int temptracks = 0;
THROW_ON_CUDA_ERR(cudaMemcpy(&temptracks, pCountTempTracks, sizeof(int), cudaMemcpyDeviceToHost));
// printf("\nLaunches: %d Slopes %d %d", ln, isx, isy);
printf("\nLaunches: %d Slopes %d %d %d", ln, isx, isy, temptracks - lasttemptracks);
lasttemptracks = temptracks;
}
#endif
}
int temptracks = 0;
THROW_ON_CUDA_ERR(cudaMemcpy(&temptracks, pCountTempTracks, sizeof(int), cudaMemcpyDeviceToHost));
printf("\nDEBUG TempTracks %d Seekgraintrials %d", temptracks, seekgraintrials);
#if 0
{
int *pwDEBUG = new int[hostHashTableBounds.NTBins];
cudaMemcpy(pwDEBUG, pTBinFill, sizeof(int) * hostHashTableBounds.NTBins, cudaMemcpyDeviceToHost);
int totaltemptracks = 0;
for (int i = 0; i < hostHashTableBounds.NTBins; i++)
{
// printf("\nDEBUG %d %d", i, pwDEBUG[i]);
totaltemptracks += pwDEBUG[i];
}
printf("\nTotal: %d", totaltemptracks);
delete [] pwDEBUG;
}
#endif
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks(pThis->m_Prop.multiProcessorCount, 1, 1);
WISE_ALLOC(pScheduler, (1 + ithreads.x * iblocks.x) * sizeof(int));
THROW_ON_CUDA_ERR(cudaMemset(pScheduler, 0, (1 + ithreads.x * iblocks.x) * sizeof(int)));
int terminate;
int ln = 0;
do
{
THROW_ON_CUDA_ERR(cudaMemset(pScheduler, 0xff, sizeof(int)));
mergetracks_kernel<<<iblocks, ithreads>>>(pCountTempTracks, ppTempTracks, pTBinFill, pTBins, pTracks, C.MergeTrackXYTolerance, C.MergeTrackZTolerance << Z_TO_XY_RESCALE_SHIFT, hostHashTableBounds.XTBins, hostHashTableBounds.YTBins, hostHashTableBounds.TBinCapacity, pScheduler + 1, pScheduler);
_CUDA_THROW_ERR_
THROW_ON_CUDA_ERR(cudaMemcpy(&terminate, pScheduler, sizeof(int), cudaMemcpyDeviceToHost));
ln++;
}
while (terminate == 0);
printf("\nLaunches: %d", ln);
{
dim3 ithreads(pThis->m_Prop.maxThreadsPerBlock, 1, 1);
dim3 iblocks(temptracks / ithreads.x + 1, 1, 1);
filltracks_kernel<<<iblocks, ithreads>>>(pCountTempTracks, ppTempTracks, pTracks);
}
}
THROW_ON_CUDA_ERR(cudaMemcpy(pHostTracks, pTracks, sizeof(TrackMapHeader), cudaMemcpyDeviceToHost));
int size1 = pHostTracks->TrackSize();
int size2 = pHostTracks->TotalSize() - pHostTracks->TrackSize();
HOST_WISE_ALLOC(pHostTracks, size1 + size2);
THROW_ON_CUDA_ERR(cudaMemcpy(pHostTracks, pTracks, size1, cudaMemcpyDeviceToHost));
}
}
} |
cad5ad37afc824b57c0ede3aa1371542b01e5e93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define LEN 256
#define TILESZ 16
// Uncomment this line if you want to display //
// the result of the computation. //
// #define DISPLAY 1
static double CLOCK();
__global__ void matInit(float*);
__global__ void stencil(float*, float*);
__global__ void stencil_tiled(float*, float*);
int main(int argc, char** argv) {
float *a, *a_host, *b;
a_host = (float*) malloc(sizeof(float) * LEN*LEN*LEN);
hipMalloc(&a, sizeof(float) * LEN*LEN*LEN);
hipMalloc(&b, sizeof(float) * LEN*LEN*LEN);
hipMemset(a, 0, sizeof(float) * LEN*LEN*LEN);
hipMemset(b, 0, sizeof(float) * LEN*LEN*LEN);
dim3 Grid, Block;
#ifdef TILED
Grid = dim3(LEN, LEN/TILESZ, LEN/TILESZ);
// Block = dim3(TILESZ, TILESZ);
Block = dim3(TILESZ);
#else
Grid = dim3(LEN);
Block = dim3(LEN);
#endif // ifdef TILED
////////////////////////////
// Initialize matrix b //
////////////////////////////
hipLaunchKernelGGL(( matInit), dim3(LEN), dim3(LEN), 0, 0, b);
////////////////////////////
// stencil computation //
////////////////////////////
double start, end;
start = CLOCK();
#ifdef TILED
hipLaunchKernelGGL(( stencil_tiled), dim3(Grid), dim3(Block), 0, 0, a, b);
#else
hipLaunchKernelGGL(( stencil), dim3(Grid), dim3(Block), 0, 0, a, b);
#endif // #ifdef TILED
hipDeviceSynchronize();
end = CLOCK();
/////////////////////////
// Display the result //
/////////////////////////
#ifdef DISPLAY
hipMemcpy(a_host, a, sizeof(float) * LEN*LEN*LEN, hipMemcpyDeviceToHost);
for (int i=0; i<LEN; ++i)
for (int j=0; j<LEN; ++j)
for (int k=0; k<LEN; ++k) {
printf("(i=%d, j=%d, k=%d) = %.2f\n",
i, j, k, a_host[i*LEN*LEN+j*LEN+k]);
}
#endif // DISPLAY
#ifdef TILED
printf("stencil-tiled took %.2f ms\n", end-start);
#else
printf("stencil took %.2f ms\n", end-start);
#endif // #ifdef TILED
return 0;
}
__global__ void
matInit(float* mat) {
int i = blockIdx.x; // int M = gridDim.x;
int j = threadIdx.x; int N = blockDim.x;
int L = LEN;
for (int k=0; k<L; ++k) {
mat[i*N*L + j*L + k] = i*N*L + j*L +k;
}
}
__global__ void
stencil(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = threadIdx.x, Y = gridDim.x,
Z = Y;
int tId = x*Y + y;
if ((x > 0 && x < X-1) &&
(y > 0 && y < Y-1)) {
for (int z = 1; z<Z-1; ++z) {
float b1 = b[(x-1)*Y*Z + y*Z + z],
b2 = b[(x+1)*Y*Z + y*Z + z],
b3 = b[x*Y*Z + (y-1)*Z + z],
b4 = b[x*Y*Z + (y+1)*Z + z],
b5 = b[x*Y*Z + y*Z + (z-1)],
b6 = b[x*Y*Z + y*Z + (z+1)];
a[tId*Z + z] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
}
__global__ void
stencil_tiled(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = blockIdx.y, Y = gridDim.y,
z = blockIdx.z, Z = gridDim.z,
s = threadIdx.x, S = blockDim.x,
T = S;
int tId = x*Y*Z*S + y*Z*S +s*Z + z;
if ((x > 0 && x < X-1) &&
(y != 0 || s != 0) && (y != Y-1 || s != S-1))
for (int t=0; t<T; ++t)
if ((z != 0 || t != 0) && (z != Z-1 || t != T-1)) {
float b1 = b[(x-1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b2 = b[(x+1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b3 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t - (T*Z)],
b4 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t + (T*Z)],
b5 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t-1)],
b6 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t+1)];
a[tId*T+t] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
double CLOCK() {
struct timespec t = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &t);
return (double) (t.tv_sec*1.0e3 + t.tv_nsec*1.0e-6);
}
| cad5ad37afc824b57c0ede3aa1371542b01e5e93.cu | #include <stdio.h>
#include <time.h>
#define LEN 256
#define TILESZ 16
// Uncomment this line if you want to display //
// the result of the computation. //
// #define DISPLAY 1
static double CLOCK();
__global__ void matInit(float*);
__global__ void stencil(float*, float*);
__global__ void stencil_tiled(float*, float*);
int main(int argc, char** argv) {
float *a, *a_host, *b;
a_host = (float*) malloc(sizeof(float) * LEN*LEN*LEN);
cudaMalloc(&a, sizeof(float) * LEN*LEN*LEN);
cudaMalloc(&b, sizeof(float) * LEN*LEN*LEN);
cudaMemset(a, 0, sizeof(float) * LEN*LEN*LEN);
cudaMemset(b, 0, sizeof(float) * LEN*LEN*LEN);
dim3 Grid, Block;
#ifdef TILED
Grid = dim3(LEN, LEN/TILESZ, LEN/TILESZ);
// Block = dim3(TILESZ, TILESZ);
Block = dim3(TILESZ);
#else
Grid = dim3(LEN);
Block = dim3(LEN);
#endif // ifdef TILED
////////////////////////////
// Initialize matrix b //
////////////////////////////
matInit<<<LEN, LEN>>>(b);
////////////////////////////
// stencil computation //
////////////////////////////
double start, end;
start = CLOCK();
#ifdef TILED
stencil_tiled<<<Grid, Block>>>(a, b);
#else
stencil<<<Grid, Block>>>(a, b);
#endif // #ifdef TILED
cudaDeviceSynchronize();
end = CLOCK();
/////////////////////////
// Display the result //
/////////////////////////
#ifdef DISPLAY
cudaMemcpy(a_host, a, sizeof(float) * LEN*LEN*LEN, cudaMemcpyDeviceToHost);
for (int i=0; i<LEN; ++i)
for (int j=0; j<LEN; ++j)
for (int k=0; k<LEN; ++k) {
printf("(i=%d, j=%d, k=%d) = %.2f\n",
i, j, k, a_host[i*LEN*LEN+j*LEN+k]);
}
#endif // DISPLAY
#ifdef TILED
printf("stencil-tiled took %.2f ms\n", end-start);
#else
printf("stencil took %.2f ms\n", end-start);
#endif // #ifdef TILED
return 0;
}
__global__ void
matInit(float* mat) {
int i = blockIdx.x; // int M = gridDim.x;
int j = threadIdx.x; int N = blockDim.x;
int L = LEN;
for (int k=0; k<L; ++k) {
mat[i*N*L + j*L + k] = i*N*L + j*L +k;
}
}
__global__ void
stencil(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = threadIdx.x, Y = gridDim.x,
Z = Y;
int tId = x*Y + y;
if ((x > 0 && x < X-1) &&
(y > 0 && y < Y-1)) {
for (int z = 1; z<Z-1; ++z) {
float b1 = b[(x-1)*Y*Z + y*Z + z],
b2 = b[(x+1)*Y*Z + y*Z + z],
b3 = b[x*Y*Z + (y-1)*Z + z],
b4 = b[x*Y*Z + (y+1)*Z + z],
b5 = b[x*Y*Z + y*Z + (z-1)],
b6 = b[x*Y*Z + y*Z + (z+1)];
a[tId*Z + z] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
}
__global__ void
stencil_tiled(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = blockIdx.y, Y = gridDim.y,
z = blockIdx.z, Z = gridDim.z,
s = threadIdx.x, S = blockDim.x,
T = S;
int tId = x*Y*Z*S + y*Z*S +s*Z + z;
if ((x > 0 && x < X-1) &&
(y != 0 || s != 0) && (y != Y-1 || s != S-1))
for (int t=0; t<T; ++t)
if ((z != 0 || t != 0) && (z != Z-1 || t != T-1)) {
float b1 = b[(x-1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b2 = b[(x+1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b3 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t - (T*Z)],
b4 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t + (T*Z)],
b5 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t-1)],
b6 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t+1)];
a[tId*T+t] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
double CLOCK() {
struct timespec t = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &t);
return (double) (t.tv_sec*1.0e3 + t.tv_nsec*1.0e-6);
}
|
8889032cc743f9e0af81324a0dd05758db0e4598.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<stdlib.h>
#include<cuda.h>
int main( void ) {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount( &count);
printf("found %d devices\n", count);
for (int i=0; i< count; i++) {
hipGetDeviceProperties( &prop, i);
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Mapped page-locked host memory : " );
if (prop.canMapHostMemory)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| 8889032cc743f9e0af81324a0dd05758db0e4598.cu | #include <stdio.h>
#include<stdlib.h>
#include<cuda.h>
int main( void ) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count);
printf("found %d devices\n", count);
for (int i=0; i< count; i++) {
cudaGetDeviceProperties( &prop, i);
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Mapped page-locked host memory : " );
if (prop.canMapHostMemory)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
2c9039f2186e406745dfebef7a01a198d47ea8cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/conv_dw_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void ConvolutionDepthwiseWeightForward(const int nthreads,
const Dtype* const bottom_data, const Dtype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / top_height / top_width;
const int c = (index / top_height / top_width) % channels;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const Dtype* weight = weight_data + c * kernel_h * kernel_w;
Dtype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
value += (*weight) * bottom_data[offset];
}
++weight;
}
}
top_data[index] = value;
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBiasForward(const int nthreads,
const Dtype* const bias_data, const int num, const int channels,
const int top_height, const int top_width, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = (index / top_height / top_width) % channels;
top_data[index] += bias_data[c];
}
}
template <typename Dtype>
void ConvolutionDepthwiseLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight_data = this->blobs_[0]->gpu_data();
const int count = top[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
ConvolutionDepthwiseWeightForward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, top_data);
if (this->layer_param_.convolution_param().bias_term()) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
ConvolutionDepthwiseBiasForward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bias_data, num, channels,
top_height, top_width, top_data);
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseWeightBackward(const int nthreads,
const Dtype* const top_diff, const Dtype* const bottom_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int kh = (index / kernel_w / num / top_height / top_width)
% kernel_h;
const int kw = (index / num / top_height / top_width) % kernel_w;
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int c = index / kernel_h / kernel_w / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int top_offset = ((n * channels + c) * top_height + h)
* top_width + w;
const int bottom_offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
buffer_data[index] = top_diff[top_offset] * bottom_data[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBottomBackward(const int nthreads,
const Dtype* const top_diff, const Dtype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / bottom_height / bottom_width;
const int c = (index / bottom_height / bottom_width) % channels;
const int h = (index / bottom_width) % bottom_height;
const int w = index % bottom_width;
const Dtype* weight = weight_data + c * kernel_h * kernel_w;
Dtype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_out_s = h + pad_h - kh * dilation_h;
const int w_out_s = w + pad_w - kw * dilation_w;
if (((h_out_s % stride_h) == 0) && ((w_out_s % stride_w) == 0)) {
const int h_out = h_out_s / stride_h;
const int w_out = w_out_s / stride_w;
if ((h_out >= 0) && (h_out < top_height)
&& (w_out >= 0) && (w_out < top_width)) {
const int offset = ((n * channels + c) * top_height + h_out)
* top_width + w_out;
value += (*weight) * top_diff[offset];
}
}
++weight;
}
}
bottom_diff[index] += value;
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBiasBackward(const int nthreads,
const Dtype* const top_diff, const int num, const int channels,
const int top_height, const int top_width, Dtype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int offset = ((n * channels + c) * top_height + h) * top_width + w;
buffer_data[index] = top_diff[offset];
}
}
template <typename Dtype>
void ConvolutionDepthwiseLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const int bottom_count = bottom[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
const int length = num * top_height * top_width;
caffe_gpu_set(bottom_count, Dtype(0), bottom[0]->mutable_gpu_diff());
if (this->layer_param_.convolution_param().bias_term()
&& this->param_propagate_down_[1]) {
const int bias_buffer_count = bias_buffer_.count();
Dtype* bias_buffer_mutable_data = bias_buffer_.mutable_gpu_data();
ConvolutionDepthwiseBiasBackward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(bias_buffer_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bias_buffer_count, top_diff, num, channels,
top_height, top_width, bias_buffer_mutable_data);
const int bias_count = this->blobs_[1]->count();
const Dtype* bias_buffer_data = bias_buffer_.gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* bias_multiplier_data = bias_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, bias_count, length, Dtype(1),
bias_buffer_data, bias_multiplier_data, Dtype(1), bias_diff);
}
if (this->param_propagate_down_[0]) {
const int weight_buffer_count = weight_buffer_.count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_buffer_mutable_data = weight_buffer_.mutable_gpu_data();
ConvolutionDepthwiseWeightBackward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(weight_buffer_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
weight_buffer_count, top_diff, bottom_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, weight_buffer_mutable_data);
const int weight_count = this->blobs_[0]->count();
const Dtype* weight_buffer_data = weight_buffer_.gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* weight_multiplier_data = weight_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, weight_count, length, Dtype(1),
weight_buffer_data, weight_multiplier_data, Dtype(1), weight_diff);
}
if (propagate_down[0]) {
const Dtype* weight_data = this->blobs_[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ConvolutionDepthwiseBottomBackward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, top_diff, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionDepthwiseLayer);
} // namespace caffe
| 2c9039f2186e406745dfebef7a01a198d47ea8cc.cu | #include <vector>
#include "caffe/layers/conv_dw_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void ConvolutionDepthwiseWeightForward(const int nthreads,
const Dtype* const bottom_data, const Dtype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / top_height / top_width;
const int c = (index / top_height / top_width) % channels;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const Dtype* weight = weight_data + c * kernel_h * kernel_w;
Dtype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
value += (*weight) * bottom_data[offset];
}
++weight;
}
}
top_data[index] = value;
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBiasForward(const int nthreads,
const Dtype* const bias_data, const int num, const int channels,
const int top_height, const int top_width, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = (index / top_height / top_width) % channels;
top_data[index] += bias_data[c];
}
}
template <typename Dtype>
void ConvolutionDepthwiseLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight_data = this->blobs_[0]->gpu_data();
const int count = top[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
ConvolutionDepthwiseWeightForward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, top_data);
if (this->layer_param_.convolution_param().bias_term()) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
ConvolutionDepthwiseBiasForward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bias_data, num, channels,
top_height, top_width, top_data);
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseWeightBackward(const int nthreads,
const Dtype* const top_diff, const Dtype* const bottom_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int kh = (index / kernel_w / num / top_height / top_width)
% kernel_h;
const int kw = (index / num / top_height / top_width) % kernel_w;
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int c = index / kernel_h / kernel_w / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int top_offset = ((n * channels + c) * top_height + h)
* top_width + w;
const int bottom_offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
buffer_data[index] = top_diff[top_offset] * bottom_data[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBottomBackward(const int nthreads,
const Dtype* const top_diff, const Dtype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / bottom_height / bottom_width;
const int c = (index / bottom_height / bottom_width) % channels;
const int h = (index / bottom_width) % bottom_height;
const int w = index % bottom_width;
const Dtype* weight = weight_data + c * kernel_h * kernel_w;
Dtype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_out_s = h + pad_h - kh * dilation_h;
const int w_out_s = w + pad_w - kw * dilation_w;
if (((h_out_s % stride_h) == 0) && ((w_out_s % stride_w) == 0)) {
const int h_out = h_out_s / stride_h;
const int w_out = w_out_s / stride_w;
if ((h_out >= 0) && (h_out < top_height)
&& (w_out >= 0) && (w_out < top_width)) {
const int offset = ((n * channels + c) * top_height + h_out)
* top_width + w_out;
value += (*weight) * top_diff[offset];
}
}
++weight;
}
}
bottom_diff[index] += value;
}
}
template <typename Dtype>
__global__ void ConvolutionDepthwiseBiasBackward(const int nthreads,
const Dtype* const top_diff, const int num, const int channels,
const int top_height, const int top_width, Dtype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int offset = ((n * channels + c) * top_height + h) * top_width + w;
buffer_data[index] = top_diff[offset];
}
}
template <typename Dtype>
void ConvolutionDepthwiseLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const int bottom_count = bottom[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
const int length = num * top_height * top_width;
caffe_gpu_set(bottom_count, Dtype(0), bottom[0]->mutable_gpu_diff());
if (this->layer_param_.convolution_param().bias_term()
&& this->param_propagate_down_[1]) {
const int bias_buffer_count = bias_buffer_.count();
Dtype* bias_buffer_mutable_data = bias_buffer_.mutable_gpu_data();
ConvolutionDepthwiseBiasBackward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(bias_buffer_count), CAFFE_CUDA_NUM_THREADS>>>(
bias_buffer_count, top_diff, num, channels,
top_height, top_width, bias_buffer_mutable_data);
const int bias_count = this->blobs_[1]->count();
const Dtype* bias_buffer_data = bias_buffer_.gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* bias_multiplier_data = bias_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, bias_count, length, Dtype(1),
bias_buffer_data, bias_multiplier_data, Dtype(1), bias_diff);
}
if (this->param_propagate_down_[0]) {
const int weight_buffer_count = weight_buffer_.count();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_buffer_mutable_data = weight_buffer_.mutable_gpu_data();
ConvolutionDepthwiseWeightBackward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(weight_buffer_count), CAFFE_CUDA_NUM_THREADS>>>(
weight_buffer_count, top_diff, bottom_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, weight_buffer_mutable_data);
const int weight_count = this->blobs_[0]->count();
const Dtype* weight_buffer_data = weight_buffer_.gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* weight_multiplier_data = weight_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, weight_count, length, Dtype(1),
weight_buffer_data, weight_multiplier_data, Dtype(1), weight_diff);
}
if (propagate_down[0]) {
const Dtype* weight_data = this->blobs_[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ConvolutionDepthwiseBottomBackward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, top_diff, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionDepthwiseLayer);
} // namespace caffe
|
fd842ff8f29748464cecfa6c82ea7e9723840574.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define N 30
//typedef long long int ll;
__global__ void align(char *key , char *s , int *scores , int n , int num)
{
int GP = -1 , MR = 1;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < num)
{
int i , j , k , tmp;
int nm[N + 1][N + 1];
char r1[2*N+2] , r2[2*N+2];
for (i = 0; i <= n; i++)
{
nm[0][i] = GP * i;
nm[i][0] = GP * i;
}
for (i = 1; i <= n; i++)
{
for (j = 1; j <= n; j++)
{
if(key[i-1] == s[n*index + j-1])
nm[i][j] = nm[i-1][j-1] + MR;
else
{
if(nm[i-1][j] <= nm[i][j-1])
nm[i][j] = nm[i][j-1] + GP;
else
nm[i][j] = nm[i-1][j] + GP;
}
}
}
/* for (i = 0; i <= n; i++)
{
for (j = 0; j <= n; j++)
{
printf("%d " , nm[i][j]);
}
printf("\n");
} */
i = n , j = n , k = 0;
//for(int cnt = 1; cnt <= 30 && i > 0 && j > 0; cnt++)
while(i > 0 && j > 0)
{
//if(index == 3)printf("**%d %d % d\n" , cnt , i , j);
tmp = nm[i-1][j-1] > nm[i][j-1] ? (nm[i-1][j-1] > nm[i-1][j] ? nm[i-1][j-1] : nm[i-1][j]) : (nm[i][j-1] > nm[i-1][j] ? nm[i][j-1] : nm[i-1][j]);
if(tmp == nm[i-1][j-1] || key[i] == s[n*index + j-1])
{
r1[k] = key[i-1];
r2[k] = s[n*index + j-1];
i--;
j--;
}
else if(tmp == nm[i][j-1])
{
r1[k] = '-';
r2[k] = s[n*index + j-1];
j--;
}
else if(tmp == nm[i-1][j])
{
r1[k] = key[i-1];
r2[k] = '-';
i--;
}
k++;
}
for(i = 0; i < k/2; i++)
{
r1[i] = (r1[i] + r1[k-i-1]) - (r1[k-i-1] = r1[i]);
r2[i] = (r2[i] + r2[k-i-1]) - (r2[k-i-1] = r2[i]);
}
r1[k] = '\0';
r2[k] = '\0';
printf("\nAlignment #%d :\n-------------------\nKey:\n%s\nQuery:\n%s\n" , index+1 , r1 , r2);
int score = 0;
for(i = 0; i < k; i++)
{
if(r1[k] == '-' || r2[k] == '-')
score += GP;
else if(r1[i] == r2[i])
score += MR;
else
score += GP;
}
scores[index] = score;
}
}
int main(int argc, char** argv)
{
int size = sizeof(int);
int THREADS = 1024;
freopen(argv[1] , "r", stdin);
freopen(argv[2] , "w", stdout);
int *host_scores , *scores;
int i , num , n;
//printf("Enter size:");
scanf("%d" , &n);
//printf("Enter number of queries:");
scanf("%d" , &num);
int m = num < THREADS ? num : THREADS;
char *host_key = (char *)malloc(n);
char *tmp = (char *)malloc(n);
char *host_q = (char *)malloc(num * n + 2);
char *key , *q;
//printf("Enter key:");
scanf("%s" , host_key);
//printf("Enter the queries:");
for(i = 0; i <num; i++)
{
if(i == 0)
scanf("%s" , host_q);
else
{
scanf("%s" , tmp);
strcat(host_q , tmp);
}
}
host_scores = (int *)malloc(size * num);
hipMalloc((void **)&scores , num * size);
hipMalloc((void **)&key , n);
hipMalloc((void **)&q , n * num + 2);
hipMemcpy(key , host_key , n , hipMemcpyHostToDevice);
hipMemcpy(q , host_q , n * num + 2 , hipMemcpyHostToDevice);
hipLaunchKernelGGL(( align) , dim3((n + m - 1) / m) , dim3(m), 0, 0, key , q , scores , n , num);
hipMemcpy(host_scores , scores , size * num , hipMemcpyDeviceToHost);
printf("\n\nAlignment Scores:\n----------------------------\n");
for(i = 0; i < num; i++)
printf("Query #%d : %d\n" , i+1 , host_scores[i]);
hipFree(key);
hipFree(q);
hipFree(scores);
return 0;
} | fd842ff8f29748464cecfa6c82ea7e9723840574.cu |
#include <stdio.h>
#include <stdlib.h>
#define N 30
//typedef long long int ll;
__global__ void align(char *key , char *s , int *scores , int n , int num)
{
int GP = -1 , MR = 1;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < num)
{
int i , j , k , tmp;
int nm[N + 1][N + 1];
char r1[2*N+2] , r2[2*N+2];
for (i = 0; i <= n; i++)
{
nm[0][i] = GP * i;
nm[i][0] = GP * i;
}
for (i = 1; i <= n; i++)
{
for (j = 1; j <= n; j++)
{
if(key[i-1] == s[n*index + j-1])
nm[i][j] = nm[i-1][j-1] + MR;
else
{
if(nm[i-1][j] <= nm[i][j-1])
nm[i][j] = nm[i][j-1] + GP;
else
nm[i][j] = nm[i-1][j] + GP;
}
}
}
/* for (i = 0; i <= n; i++)
{
for (j = 0; j <= n; j++)
{
printf("%d " , nm[i][j]);
}
printf("\n");
} */
i = n , j = n , k = 0;
//for(int cnt = 1; cnt <= 30 && i > 0 && j > 0; cnt++)
while(i > 0 && j > 0)
{
//if(index == 3)printf("**%d %d % d\n" , cnt , i , j);
tmp = nm[i-1][j-1] > nm[i][j-1] ? (nm[i-1][j-1] > nm[i-1][j] ? nm[i-1][j-1] : nm[i-1][j]) : (nm[i][j-1] > nm[i-1][j] ? nm[i][j-1] : nm[i-1][j]);
if(tmp == nm[i-1][j-1] || key[i] == s[n*index + j-1])
{
r1[k] = key[i-1];
r2[k] = s[n*index + j-1];
i--;
j--;
}
else if(tmp == nm[i][j-1])
{
r1[k] = '-';
r2[k] = s[n*index + j-1];
j--;
}
else if(tmp == nm[i-1][j])
{
r1[k] = key[i-1];
r2[k] = '-';
i--;
}
k++;
}
for(i = 0; i < k/2; i++)
{
r1[i] = (r1[i] + r1[k-i-1]) - (r1[k-i-1] = r1[i]);
r2[i] = (r2[i] + r2[k-i-1]) - (r2[k-i-1] = r2[i]);
}
r1[k] = '\0';
r2[k] = '\0';
printf("\nAlignment #%d :\n-------------------\nKey:\n%s\nQuery:\n%s\n" , index+1 , r1 , r2);
int score = 0;
for(i = 0; i < k; i++)
{
if(r1[k] == '-' || r2[k] == '-')
score += GP;
else if(r1[i] == r2[i])
score += MR;
else
score += GP;
}
scores[index] = score;
}
}
int main(int argc, char** argv)
{
int size = sizeof(int);
int THREADS = 1024;
freopen(argv[1] , "r", stdin);
freopen(argv[2] , "w", stdout);
int *host_scores , *scores;
int i , num , n;
//printf("Enter size:");
scanf("%d" , &n);
//printf("Enter number of queries:");
scanf("%d" , &num);
int m = num < THREADS ? num : THREADS;
char *host_key = (char *)malloc(n);
char *tmp = (char *)malloc(n);
char *host_q = (char *)malloc(num * n + 2);
char *key , *q;
//printf("Enter key:");
scanf("%s" , host_key);
//printf("Enter the queries:");
for(i = 0; i <num; i++)
{
if(i == 0)
scanf("%s" , host_q);
else
{
scanf("%s" , tmp);
strcat(host_q , tmp);
}
}
host_scores = (int *)malloc(size * num);
cudaMalloc((void **)&scores , num * size);
cudaMalloc((void **)&key , n);
cudaMalloc((void **)&q , n * num + 2);
cudaMemcpy(key , host_key , n , cudaMemcpyHostToDevice);
cudaMemcpy(q , host_q , n * num + 2 , cudaMemcpyHostToDevice);
align <<<(n + m - 1) / m , m>>> (key , q , scores , n , num);
cudaMemcpy(host_scores , scores , size * num , cudaMemcpyDeviceToHost);
printf("\n\nAlignment Scores:\n----------------------------\n");
for(i = 0; i < num; i++)
printf("Query #%d : %d\n" , i+1 , host_scores[i]);
cudaFree(key);
cudaFree(q);
cudaFree(scores);
return 0;
} |
0b04e71399f9010078e227cb4203ef3879dff608.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/syncedmem.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void DropoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>* >& bottom, vector<Blob<Dtype>* >* top){
NeuronLayer<Dtype>::SetUp(bottom, top);
rand_vec_.reset(new SyncedMemory(bottom[0]->count() * sizeof(int)));
threshold_ = this->layer_param_.dropout_ratio();
DCHECK(threshold_ > 0. );
DCHECK(threshold_ < 1.);
scale_ = 1 / (1. - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype> *> &bottom,
vector<Blob<Dtype> *> *top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
if(Caffe::phase() == Caffe::TRAIN){
int* mask = (int*)(rand_vec_->mutable_cpu_data());
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(), count, mask, 1. - threshold_);
for (int i = 0; i<count; ++i){
top_data[i] = bottom_data[i] * mask[i] * scale_;
}
}
else{
memcpy(top_data, bottom_data, bottom[0]->count() * sizeof(Dtype));
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype> *> &top, const bool propagate_down,
vector<Blob<Dtype> *> *bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if(propagate_down){
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int* mask = (int*)(rand_vec_->cpu_data());
const int count = (*bottom)[0]->count();
for(int i=0; i<count; ++i){
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
}
return Dtype(0);
}
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(),
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
// set thresholds
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, (unsigned int*)(rand_vec_->gpu_data()), uint_thres_, scale_,
top_data);
CUDA_POST_KERNEL_CHECK;
} else {
CUDA_CHECK(hipMemcpy(top_data, bottom_data,
count * sizeof(Dtype), hipMemcpyDeviceToDevice));
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
if (mask[index] > threshold) {
out_diff[index] = in_diff[index] * scale;
}
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const unsigned int* mask = (unsigned int*)(rand_vec_->gpu_data());
const int count = (*bottom)[0]->count();
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
return Dtype(0);
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe | 0b04e71399f9010078e227cb4203ef3879dff608.cu | #include <algorithm>
#include <limits>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/syncedmem.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void DropoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>* >& bottom, vector<Blob<Dtype>* >* top){
NeuronLayer<Dtype>::SetUp(bottom, top);
rand_vec_.reset(new SyncedMemory(bottom[0]->count() * sizeof(int)));
threshold_ = this->layer_param_.dropout_ratio();
DCHECK(threshold_ > 0. );
DCHECK(threshold_ < 1.);
scale_ = 1 / (1. - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype> *> &bottom,
vector<Blob<Dtype> *> *top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
const int count = bottom[0]->count();
if(Caffe::phase() == Caffe::TRAIN){
int* mask = (int*)(rand_vec_->mutable_cpu_data());
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(), count, mask, 1. - threshold_);
for (int i = 0; i<count; ++i){
top_data[i] = bottom_data[i] * mask[i] * scale_;
}
}
else{
memcpy(top_data, bottom_data, bottom[0]->count() * sizeof(Dtype));
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype> *> &top, const bool propagate_down,
vector<Blob<Dtype> *> *bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if(propagate_down){
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int* mask = (int*)(rand_vec_->cpu_data());
const int count = (*bottom)[0]->count();
for(int i=0; i<count; ++i){
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
}
return Dtype(0);
}
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
// set thresholds
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, (unsigned int*)(rand_vec_->gpu_data()), uint_thres_, scale_,
top_data);
CUDA_POST_KERNEL_CHECK;
} else {
CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
count * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
if (mask[index] > threshold) {
out_diff[index] = in_diff[index] * scale;
}
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const unsigned int* mask = (unsigned int*)(rand_vec_->gpu_data());
const int count = (*bottom)[0]->count();
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
return Dtype(0);
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe |
ab31f1690ead0f0bf0e6e44c1fc25bb8dee946a8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic shape plugin requires TRT version greater than 6.0.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
EmbEltwiseLayernormPluginDynamicImpl<
T>::~EmbEltwiseLayernormPluginDynamicImpl() {}
inline half fp32tofp16(float x) { return static_cast<half>(x); }
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData(
const EmbEltwiseLayernormPluginDynamicImplBase *anthor) {
auto *ptr =
dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor);
if (!ptr->is_initialized_) {
return;
}
embs_gpu_ = ptr->embs_gpu_;
scale_gpu_ = ptr->scale_gpu_;
bias_gpu_ = ptr->bias_gpu_;
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_);
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() {
if (is_initialized_) {
return 0;
}
embs_gpu_.resize(embs_.size());
for (int i = 0; i < embs_.size(); i++) {
if (embs_[i]) {
T *host_ptr;
auto size = emb_sizes_[i];
if (std::is_same<T, half>::value) {
host_ptr = new T[size];
std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16);
} else {
host_ptr = reinterpret_cast<T *>(embs_[i]);
}
hipMalloc(&embs_gpu_[i], sizeof(T) * size);
hipMemcpy(
embs_gpu_[i], host_ptr, size * sizeof(T), hipMemcpyHostToDevice);
if (std::is_same<T, half>::value) {
delete[] host_ptr;
}
}
}
if (bias_) {
hipMalloc(&bias_gpu_, sizeof(float) * bias_size_);
hipMemcpy(
bias_gpu_, bias_, bias_size_ * sizeof(float), hipMemcpyHostToDevice);
}
if (scale_) {
hipMalloc(&scale_gpu_, sizeof(float) * scale_size_);
hipMemcpy(scale_gpu_,
scale_,
scale_size_ * sizeof(float),
hipMemcpyHostToDevice);
}
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.Resize({input_num});
hipGetDevice(&device_id_);
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
hipMemcpy(emb_ptr_gpu_d,
embs_gpu_.data(),
sizeof(uintptr_t) * input_num,
hipMemcpyHostToDevice);
is_initialized_ = true;
return 0;
}
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() {
for (int i = 0; i < embs_gpu_.size(); ++i) {
if (embs_gpu_[i]) {
hipFree(embs_gpu_[i]);
embs_gpu_[i] = nullptr;
}
}
if (bias_gpu_) {
hipFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
hipFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto id_dims = input_desc[0].dims;
int batch = id_dims.d[0];
int seq_len = id_dims.d[1];
int input_num = embs_.size();
hipGetDevice(&device_id_);
auto in_ptr_gpu_d =
in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
hipMemcpyAsync(in_ptr_gpu_d,
reinterpret_cast<const void *>(inputs),
sizeof(uintptr_t) * input_num,
hipMemcpyHostToDevice,
stream);
auto out_type = output_desc[0].type;
if (std::is_same<T, float>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kFLOAT,
true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp32 input."));
} else if (std::is_same<T, half>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp16 input."));
} else {
PADDLE_THROW(platform::errors::Fatal(
"Unsupport data type, the out type of EmbEltwiseLayernorm should be "
"float or half."));
}
auto *output_d = reinterpret_cast<T *>(outputs[0]);
operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func;
emb_eltwise_layernorm_func(batch,
seq_len,
hidden_size_,
in_ptr_gpu_d,
scale_gpu_,
bias_gpu_,
emb_ptr_gpu_d,
output_d,
eps_,
input_num,
stream);
return hipGetLastError() != hipSuccess;
}
template class EmbEltwiseLayernormPluginDynamicImpl<float>;
#ifdef TRT_PLUGIN_FP16_AVALIABLE
template class EmbEltwiseLayernormPluginDynamicImpl<half>;
#endif
int EmbEltwiseLayernormPluginDynamic::initialize() TRT_NOEXCEPT {
impl_->initialize();
return 0;
}
void EmbEltwiseLayernormPluginDynamic::terminate() TRT_NOEXCEPT {
impl_->terminate();
}
nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // NOLINT
PADDLE_ENFORCE_EQ(output_index,
0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(hidden_size_);
return ret;
}
bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_EQ(nb_outputs,
1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_EQ(nb_outputs,
1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
int all_nums = nb_inputs + nb_outputs;
const nvinfer1::PluginTensorDesc &desc = in_out[pos];
if (desc.format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos == 0) {
return desc.type == nvinfer1::DataType::kINT32;
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos < all_nums - 1) {
return desc.type == nvinfer1::DataType::kINT32 &&
desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1];
}
if (pos == all_nums - 1) {
if (with_fp16_ == false) {
return desc.type == nvinfer1::DataType::kFLOAT;
} else {
return desc.type == nvinfer1::DataType::kHALF;
}
}
return false;
}
nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
index,
0,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
if (with_fp16_)
return nvinfer1::DataType::kHALF;
else
return nvinfer1::DataType::kFLOAT;
}
int EmbEltwiseLayernormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream);
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| ab31f1690ead0f0bf0e6e44c1fc25bb8dee946a8.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic shape plugin requires TRT version greater than 6.0.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
EmbEltwiseLayernormPluginDynamicImpl<
T>::~EmbEltwiseLayernormPluginDynamicImpl() {}
inline half fp32tofp16(float x) { return static_cast<half>(x); }
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::shareGPUData(
const EmbEltwiseLayernormPluginDynamicImplBase *anthor) {
auto *ptr =
dynamic_cast<const EmbEltwiseLayernormPluginDynamicImpl<T> *>(anthor);
if (!ptr->is_initialized_) {
return;
}
embs_gpu_ = ptr->embs_gpu_;
scale_gpu_ = ptr->scale_gpu_;
bias_gpu_ = ptr->bias_gpu_;
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.ShareDataWith(ptr->emb_ptr_tensor_);
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() {
if (is_initialized_) {
return 0;
}
embs_gpu_.resize(embs_.size());
for (int i = 0; i < embs_.size(); i++) {
if (embs_[i]) {
T *host_ptr;
auto size = emb_sizes_[i];
if (std::is_same<T, half>::value) {
host_ptr = new T[size];
std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16);
} else {
host_ptr = reinterpret_cast<T *>(embs_[i]);
}
cudaMalloc(&embs_gpu_[i], sizeof(T) * size);
cudaMemcpy(
embs_gpu_[i], host_ptr, size * sizeof(T), cudaMemcpyHostToDevice);
if (std::is_same<T, half>::value) {
delete[] host_ptr;
}
}
}
if (bias_) {
cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_);
cudaMemcpy(
bias_gpu_, bias_, bias_size_ * sizeof(float), cudaMemcpyHostToDevice);
}
if (scale_) {
cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_);
cudaMemcpy(scale_gpu_,
scale_,
scale_size_ * sizeof(float),
cudaMemcpyHostToDevice);
}
int input_num = embs_.size();
in_ptr_tensor_.Resize({input_num});
emb_ptr_tensor_.Resize({input_num});
cudaGetDevice(&device_id_);
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
cudaMemcpy(emb_ptr_gpu_d,
embs_gpu_.data(),
sizeof(uintptr_t) * input_num,
cudaMemcpyHostToDevice);
is_initialized_ = true;
return 0;
}
template <typename T>
void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() {
for (int i = 0; i < embs_gpu_.size(); ++i) {
if (embs_gpu_[i]) {
cudaFree(embs_gpu_[i]);
embs_gpu_[i] = nullptr;
}
}
if (bias_gpu_) {
cudaFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
cudaFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
template <typename T>
int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto id_dims = input_desc[0].dims;
int batch = id_dims.d[0];
int seq_len = id_dims.d[1];
int input_num = embs_.size();
cudaGetDevice(&device_id_);
auto in_ptr_gpu_d =
in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
auto emb_ptr_gpu_d =
emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_));
cudaMemcpyAsync(in_ptr_gpu_d,
reinterpret_cast<const void *>(inputs),
sizeof(uintptr_t) * input_num,
cudaMemcpyHostToDevice,
stream);
auto out_type = output_desc[0].type;
if (std::is_same<T, float>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kFLOAT,
true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp32 input."));
} else if (std::is_same<T, half>::value) {
PADDLE_ENFORCE_EQ(
out_type == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only support fp16 input."));
} else {
PADDLE_THROW(platform::errors::Fatal(
"Unsupport data type, the out type of EmbEltwiseLayernorm should be "
"float or half."));
}
auto *output_d = reinterpret_cast<T *>(outputs[0]);
operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func;
emb_eltwise_layernorm_func(batch,
seq_len,
hidden_size_,
in_ptr_gpu_d,
scale_gpu_,
bias_gpu_,
emb_ptr_gpu_d,
output_d,
eps_,
input_num,
stream);
return cudaGetLastError() != cudaSuccess;
}
template class EmbEltwiseLayernormPluginDynamicImpl<float>;
#ifdef TRT_PLUGIN_FP16_AVALIABLE
template class EmbEltwiseLayernormPluginDynamicImpl<half>;
#endif
int EmbEltwiseLayernormPluginDynamic::initialize() TRT_NOEXCEPT {
impl_->initialize();
return 0;
}
void EmbEltwiseLayernormPluginDynamic::terminate() TRT_NOEXCEPT {
impl_->terminate();
}
nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { // NOLINT
PADDLE_ENFORCE_EQ(output_index,
0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(hidden_size_);
return ret;
}
bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_EQ(nb_outputs,
1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_EQ(nb_outputs,
1,
platform::errors::InvalidArgument(
"The EmbEltwiseLayerNorm's output should be one"
"but it's (%d) outputs.",
nb_outputs));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
int all_nums = nb_inputs + nb_outputs;
const nvinfer1::PluginTensorDesc &desc = in_out[pos];
if (desc.format != nvinfer1::TensorFormat::kLINEAR) {
return false;
}
if (pos == 0) {
return desc.type == nvinfer1::DataType::kINT32;
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos < all_nums - 1) {
return desc.type == nvinfer1::DataType::kINT32 &&
desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1];
}
if (pos == all_nums - 1) {
if (with_fp16_ == false) {
return desc.type == nvinfer1::DataType::kFLOAT;
} else {
return desc.type == nvinfer1::DataType::kHALF;
}
}
return false;
}
nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
index,
0,
platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
if (with_fp16_)
return nvinfer1::DataType::kHALF;
else
return nvinfer1::DataType::kFLOAT;
}
int EmbEltwiseLayernormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream);
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
4d0946dafc6c8aebede9c44041cda57eebf1b0f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/range_native.h>
#endif
#define GPU_LAMBDA __device__ __host__
namespace {
#if defined(USE_ROCM)
constexpr int num_threads() {
return 128;
}
#else
constexpr int num_threads() {
return C10_WARP_SIZE * 2;
}
#endif
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads();
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads())
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads() * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int>), dim3(grid), dim3(num_threads()), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int64_t>), dim3(grid), dim3(num_threads()), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, double base, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
| 4d0946dafc6c8aebede9c44041cda57eebf1b0f9.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/arange_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/linspace_native.h>
#include <ATen/ops/logspace_native.h>
#include <ATen/ops/range_native.h>
#endif
#define GPU_LAMBDA __device__ __host__
namespace {
#if defined(USE_ROCM)
constexpr int num_threads() {
return 128;
}
#else
constexpr int num_threads() {
return C10_WARP_SIZE * 2;
}
#endif
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads();
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads())
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads() * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
elementwise_kernel_with_index<int><<<grid, num_threads(), 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
elementwise_kernel_with_index<int64_t><<<grid, num_threads(), 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, double base, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(std::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(std::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
|
5129e9278ae37c25f82ac407ff0bf24bc71cd0d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
//unsigned int radius;
#define radius 16
#define FILTER_LENGTH (2 * radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 6
#define tileRH 1
#define tileRW 512
#define tileCH 16
#define tileCW 16
typedef float numid;
__constant__ numid d_Filter[FILTER_LENGTH];
__global__ void tiledConvRowGPU(numid *d_Dst, numid *d_Src, int imageW, int imageH){
int k;
numid sum = 0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int row = blockDim.y*by + ty ;
int col = blockDim.x*bx + tx;
int newImageW = imageW + radius * 2;
__shared__ numid ShMemory[tileRH] [tileRW + 2 * radius];
if(tx-radius<0){ //Near Left Bounds
ShMemory[ty][tx] = d_Src[(row+radius) * newImageW + col];
}
ShMemory[ty][tx+radius] = d_Src[(row+radius) * newImageW + col + radius]; //Center
if(tx >= (tileRW - radius)){
ShMemory[ty] [tx + 2 * radius] = d_Src[(row+radius) * newImageW + col + 2 * radius]; //Near Right Bounds
}
__syncthreads();
for (k = -radius; k <= radius; k++) {
sum += ShMemory[ty][tx+k+radius] * d_Filter[radius - k];
}
d_Dst[(row+radius) * newImageW + col+radius] = sum;
}
__global__ void tiledConvColGPU(numid *d_Dst, numid *d_Src, int imageW, int imageH){
int k;
numid sum = 0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int row = blockDim.y*by + ty ;
int col = blockDim.x*bx + tx;
int newImageW = imageW + radius * 2;
__shared__ numid ShMemory[tileCH + 2 * radius][ tileCW];
if(ty-radius<0){ //Upper Bounds
ShMemory[ty] [tx] = d_Src[row * newImageW + col + radius];
}
ShMemory[ty + radius][ tx ] = d_Src[(row + radius) * newImageW + col + radius ]; //Center
ShMemory[ty + 2 * radius ][ tx ] = d_Src[(row + 2* radius) * newImageW + col + radius ]; //Lower Bounds
__syncthreads();
for (k = -radius; k <= radius; k++) {
sum += ShMemory[(ty + k + radius)][tx] * d_Filter[radius - k];
}
d_Dst[ (row + radius) * newImageW + col + radius] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(numid *h_Dst, numid *h_Src, numid *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
numid sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(numid *h_Dst, numid *h_Src, numid *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
numid sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
hipSetDevice(0);
numid
*h_Filter,
*h_Input,
*h_PadInput,
*h_Buffer,
*h_OutputCPU,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*result;
struct timespec tv1, tv2;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int imageW;
int imageH;
unsigned int i,j;
if(argc<2){
printf("Please specify the image size as execution arguments\n");
return 0;
}
imageW=atoi(argv[1]);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
// printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
// scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (numid *)malloc(FILTER_LENGTH * sizeof(numid));
h_Input = (numid *)malloc(imageW * imageH * sizeof(numid));
h_PadInput = (numid *)malloc((imageW+radius*2 )*(2*radius+ imageH) * sizeof(numid)) ;
h_Buffer = (numid *)malloc(imageW * imageH * sizeof(numid));
h_OutputCPU = (numid *)malloc(imageW * imageH * sizeof(numid));
result = (numid *)malloc((imageW+2*radius) * (imageH+2*radius) * sizeof(numid));
hipMalloc(&d_Input,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
hipMalloc(&d_Buffer,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
hipMemset(d_Buffer,0,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
hipMalloc(&d_OutputGPU,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
if(d_Filter==NULL || d_Input==NULL || d_Buffer==NULL || d_OutputGPU==NULL){
printf("Cuda Malloc Failed\n");
return 0;
}
hipMemset(d_OutputGPU,0,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (numid)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (numid)rand() / ((numid)RAND_MAX / 255) + (numid)rand() / (numid)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, radius); // convolution kata sthles
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time = %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGridR(imageW/tileRW,imageH/tileRH);
dim3 dimBlockR(tileRW,tileRH);
dim3 dimGridC(imageW/tileCW,imageH/tileCH);
dim3 dimBlockC(tileCW,tileCH);
for(i=0;i<(imageW+2*radius)*(imageW+2*radius);i++){
h_PadInput[i]=0;
}
for(i=0;i<imageW;i++){
for(j=0;j<imageW;j++){
h_PadInput[(i+radius)*(2*radius+imageW)+j+radius]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
hipMemcpyToSymbol(d_Filter, h_Filter,FILTER_LENGTH*sizeof(numid));
hipMemcpy(d_Input,h_PadInput,(imageH+2*radius)*(imageW+2*radius)*sizeof(numid),hipMemcpyHostToDevice);
hipEventRecord(start,0);
hipLaunchKernelGGL(( tiledConvRowGPU) , dim3(dimGridR), dim3(dimBlockR) , 0, 0, d_Buffer, d_Input, imageW, imageH);
hipDeviceSynchronize();
hipError_t error=hipGetLastError();
if(error!=hipSuccess){
printf("Cuda Error:%s\n",hipGetErrorString(error));
hipDeviceReset();
return 0;
}
hipLaunchKernelGGL(( tiledConvColGPU) , dim3(dimGridC), dim3(dimBlockC) , 0, 0, d_OutputGPU, d_Buffer , imageW, imageH);
hipDeviceSynchronize();
error=hipGetLastError();
if(error!=hipSuccess){
printf("Cuda Error:%s\n",hipGetErrorString(error));
hipDeviceReset();
return 0;
}
hipEventRecord(stop,0);
hipMemcpy(result,d_OutputGPU,(imageH+2*radius)*(imageW+2*radius)*sizeof(numid),hipMemcpyDeviceToHost);
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed,start,stop);
printf("GPU time :%f ms.\n",elapsed);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
for(i=0;i<imageW;i++){
for(j=0;j<imageH;j++){
numid diff= h_OutputCPU[i*imageW+j]-result[(i+radius)*(imageW+2*radius)+j+radius];
if(ABS(diff)>accuracy){
printf("sfalma akriveias %f",ABS(diff));
}
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
hipFree(d_OutputGPU);
hipFree(d_Buffer);
hipFree(d_Input);
hipFree(d_Filter);
hipDeviceReset();
return 0;
}
| 5129e9278ae37c25f82ac407ff0bf24bc71cd0d8.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
//unsigned int radius;
#define radius 16
#define FILTER_LENGTH (2 * radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 6
#define tileRH 1
#define tileRW 512
#define tileCH 16
#define tileCW 16
typedef float numid;
__constant__ numid d_Filter[FILTER_LENGTH];
__global__ void tiledConvRowGPU(numid *d_Dst, numid *d_Src, int imageW, int imageH){
int k;
numid sum = 0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int row = blockDim.y*by + ty ;
int col = blockDim.x*bx + tx;
int newImageW = imageW + radius * 2;
__shared__ numid ShMemory[tileRH] [tileRW + 2 * radius];
if(tx-radius<0){ //Near Left Bounds
ShMemory[ty][tx] = d_Src[(row+radius) * newImageW + col];
}
ShMemory[ty][tx+radius] = d_Src[(row+radius) * newImageW + col + radius]; //Center
if(tx >= (tileRW - radius)){
ShMemory[ty] [tx + 2 * radius] = d_Src[(row+radius) * newImageW + col + 2 * radius]; //Near Right Bounds
}
__syncthreads();
for (k = -radius; k <= radius; k++) {
sum += ShMemory[ty][tx+k+radius] * d_Filter[radius - k];
}
d_Dst[(row+radius) * newImageW + col+radius] = sum;
}
__global__ void tiledConvColGPU(numid *d_Dst, numid *d_Src, int imageW, int imageH){
int k;
numid sum = 0;
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int row = blockDim.y*by + ty ;
int col = blockDim.x*bx + tx;
int newImageW = imageW + radius * 2;
__shared__ numid ShMemory[tileCH + 2 * radius][ tileCW];
if(ty-radius<0){ //Upper Bounds
ShMemory[ty] [tx] = d_Src[row * newImageW + col + radius];
}
ShMemory[ty + radius][ tx ] = d_Src[(row + radius) * newImageW + col + radius ]; //Center
ShMemory[ty + 2 * radius ][ tx ] = d_Src[(row + 2* radius) * newImageW + col + radius ]; //Lower Bounds
__syncthreads();
for (k = -radius; k <= radius; k++) {
sum += ShMemory[(ty + k + radius)][tx] * d_Filter[radius - k];
}
d_Dst[ (row + radius) * newImageW + col + radius] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(numid *h_Dst, numid *h_Src, numid *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
numid sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(numid *h_Dst, numid *h_Src, numid *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
numid sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
cudaSetDevice(0);
numid
*h_Filter,
*h_Input,
*h_PadInput,
*h_Buffer,
*h_OutputCPU,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*result;
struct timespec tv1, tv2;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int imageW;
int imageH;
unsigned int i,j;
if(argc<2){
printf("Please specify the image size as execution arguments\n");
return 0;
}
imageW=atoi(argv[1]);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
// printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
// scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (numid *)malloc(FILTER_LENGTH * sizeof(numid));
h_Input = (numid *)malloc(imageW * imageH * sizeof(numid));
h_PadInput = (numid *)malloc((imageW+radius*2 )*(2*radius+ imageH) * sizeof(numid)) ;
h_Buffer = (numid *)malloc(imageW * imageH * sizeof(numid));
h_OutputCPU = (numid *)malloc(imageW * imageH * sizeof(numid));
result = (numid *)malloc((imageW+2*radius) * (imageH+2*radius) * sizeof(numid));
cudaMalloc(&d_Input,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
cudaMalloc(&d_Buffer,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
cudaMemset(d_Buffer,0,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
cudaMalloc(&d_OutputGPU,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
if(d_Filter==NULL || d_Input==NULL || d_Buffer==NULL || d_OutputGPU==NULL){
printf("Cuda Malloc Failed\n");
return 0;
}
cudaMemset(d_OutputGPU,0,(imageW+2*radius)*(imageH+2*radius)*sizeof(numid));
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (numid)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (numid)rand() / ((numid)RAND_MAX / 255) + (numid)rand() / (numid)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, radius); // convolution kata sthles
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time = %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGridR(imageW/tileRW,imageH/tileRH);
dim3 dimBlockR(tileRW,tileRH);
dim3 dimGridC(imageW/tileCW,imageH/tileCH);
dim3 dimBlockC(tileCW,tileCH);
for(i=0;i<(imageW+2*radius)*(imageW+2*radius);i++){
h_PadInput[i]=0;
}
for(i=0;i<imageW;i++){
for(j=0;j<imageW;j++){
h_PadInput[(i+radius)*(2*radius+imageW)+j+radius]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
cudaMemcpyToSymbol(d_Filter, h_Filter,FILTER_LENGTH*sizeof(numid));
cudaMemcpy(d_Input,h_PadInput,(imageH+2*radius)*(imageW+2*radius)*sizeof(numid),cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
tiledConvRowGPU <<< dimGridR, dimBlockR >>>(d_Buffer, d_Input, imageW, imageH);
cudaThreadSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Cuda Error:%s\n",cudaGetErrorString(error));
cudaDeviceReset();
return 0;
}
tiledConvColGPU <<< dimGridC, dimBlockC >>>(d_OutputGPU, d_Buffer , imageW, imageH);
cudaThreadSynchronize();
error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Cuda Error:%s\n",cudaGetErrorString(error));
cudaDeviceReset();
return 0;
}
cudaEventRecord(stop,0);
cudaMemcpy(result,d_OutputGPU,(imageH+2*radius)*(imageW+2*radius)*sizeof(numid),cudaMemcpyDeviceToHost);
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
printf("GPU time :%f ms.\n",elapsed);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
for(i=0;i<imageW;i++){
for(j=0;j<imageH;j++){
numid diff= h_OutputCPU[i*imageW+j]-result[(i+radius)*(imageW+2*radius)+j+radius];
if(ABS(diff)>accuracy){
printf("sfalma akriveias %f",ABS(diff));
}
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaDeviceReset();
return 0;
}
|
daaef4f47141a6e6c31c5f43cbd9a557f0bbb655.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <cassert>
#include <cstdio>
#include <math.h>
#include <memory>
#include "matx.h"
using namespace matx;
#define FFT_TYPE HIPFFT_C2C
/** Create a spectrogram of a signal
*
* This example creates a set of data representing signal power versus frequency
* and time. Traditionally the signal power is plotted as the Z dimension using
* color, and time/frequency are the X/Y axes. The time taken to run the
* spectrogram is computed, and a simple scatter plot is output. This version
* does not use CUDA graphs, and kernel launches are launched in a loop
* asynchronously from the host. See spectrogram_graph.cu for a version using
* CUDA graphs, which gives a performance boost by launching a graph once per
* iteration instead of separate kernels.
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
hipStream_t stream;
hipStreamCreate(&stream);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float fs = 10000;
constexpr index_t N = 100000;
float amp = static_cast<float>(2 * sqrt(2));
constexpr index_t nperseg = 256;
constexpr index_t nfft = 256;
constexpr index_t noverlap = nperseg / 8;
constexpr index_t nstep = nperseg - noverlap;
constexpr uint32_t num_iterations = 100;
float time_ms;
std::array<index_t, 1> num_samps{N};
std::array<index_t, 1> half_win{nfft / 2 + 1};
std::array<index_t, 1> s_time_shape{(N - noverlap) / nstep};
auto time = make_tensor<float>({N});
auto modulation = make_tensor<float>({N});
auto carrier = make_tensor<float>({N});
auto noise = make_tensor<float>({N});
auto x = make_tensor<float>({N});
auto freqs = make_tensor<float>({nfft / 2 + 1});
auto fftStackedMatrix = make_tensor<complex>({(N - noverlap) / nstep, nfft / 2 + 1});
auto s_time = make_tensor<float>({(N - noverlap) / nstep});
// Set up all static buffers
// time = np.arange(N) / float(fs)
(time = linspace<0>(num_samps, 0.0f, static_cast<float>(N) - 1.0f) / fs)
.run(stream);
// mod = 500 * np.cos(2*np.pi*0.25*time)
(modulation = 500 * cos(2 * M_PI * 0.25 * time)).run(stream);
// carrier = amp * np.sin(2*np.pi*3e3*time + modulation)
(carrier = amp * sin(2 * M_PI * 3000 * time + modulation)).run(stream);
// noise = 0.01 * fs / 2 * np.random.randn(time.shape)
(noise = sqrt(0.01 * fs / 2) * random<float>({N}, NORMAL)).run(stream);
// noise *= np.exp(-time/5)
(noise = noise * exp(-1.0f * time / 5.0f)).run(stream);
// x = carrier + noise
(x = carrier + noise).run(stream);
for (uint32_t i = 0; i < num_iterations; i++) {
if (i == 2) { // Start timer on third loop to allow generation of plot
hipEventRecord(start, stream);
}
// DFT Sample Frequencies (rfftfreq)
(freqs = (1.0 / (static_cast<float>(nfft) * 1 / fs)) *
linspace<0>(half_win, 0.0f, static_cast<float>(nfft) / 2.0f))
.run(stream);
// Create overlapping matrix of segments.
auto stackedMatrix = overlap(x, {nperseg}, {nstep});
// FFT along rows
(fftStackedMatrix = fft(stackedMatrix)).run(stream);
// Absolute value
(fftStackedMatrix = conj(fftStackedMatrix) * fftStackedMatrix)
.run(stream);
// Get real part and transpose
auto Sxx = fftStackedMatrix.RealView().Permute({1, 0});
// Spectral time axis
(s_time = linspace<0>(s_time_shape, static_cast<float>(nperseg) / 2.0f,
static_cast<float>(N - nperseg) / 2.0f + 1) /
fs)
.run(stream);
if (i == 1) {
#if MATX_ENABLE_VIZ
// Generate a spectrogram visualization using a contour plot
viz::contour(time, freqs, Sxx);
#else
printf("Not outputting plot since visualizations disabled\n");
#endif
}
}
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&time_ms, start, stop);
printf("Spectrogram Time Without Graphs = %.2fus per iteration\n",
time_ms * 1e3 / num_iterations);
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream);
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
| daaef4f47141a6e6c31c5f43cbd9a557f0bbb655.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <cassert>
#include <cstdio>
#include <math.h>
#include <memory>
#include "matx.h"
using namespace matx;
#define FFT_TYPE CUFFT_C2C
/** Create a spectrogram of a signal
*
* This example creates a set of data representing signal power versus frequency
* and time. Traditionally the signal power is plotted as the Z dimension using
* color, and time/frequency are the X/Y axes. The time taken to run the
* spectrogram is computed, and a simple scatter plot is output. This version
* does not use CUDA graphs, and kernel launches are launched in a loop
* asynchronously from the host. See spectrogram_graph.cu for a version using
* CUDA graphs, which gives a performance boost by launching a graph once per
* iteration instead of separate kernels.
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float fs = 10000;
constexpr index_t N = 100000;
float amp = static_cast<float>(2 * sqrt(2));
constexpr index_t nperseg = 256;
constexpr index_t nfft = 256;
constexpr index_t noverlap = nperseg / 8;
constexpr index_t nstep = nperseg - noverlap;
constexpr uint32_t num_iterations = 100;
float time_ms;
std::array<index_t, 1> num_samps{N};
std::array<index_t, 1> half_win{nfft / 2 + 1};
std::array<index_t, 1> s_time_shape{(N - noverlap) / nstep};
auto time = make_tensor<float>({N});
auto modulation = make_tensor<float>({N});
auto carrier = make_tensor<float>({N});
auto noise = make_tensor<float>({N});
auto x = make_tensor<float>({N});
auto freqs = make_tensor<float>({nfft / 2 + 1});
auto fftStackedMatrix = make_tensor<complex>({(N - noverlap) / nstep, nfft / 2 + 1});
auto s_time = make_tensor<float>({(N - noverlap) / nstep});
// Set up all static buffers
// time = np.arange(N) / float(fs)
(time = linspace<0>(num_samps, 0.0f, static_cast<float>(N) - 1.0f) / fs)
.run(stream);
// mod = 500 * np.cos(2*np.pi*0.25*time)
(modulation = 500 * cos(2 * M_PI * 0.25 * time)).run(stream);
// carrier = amp * np.sin(2*np.pi*3e3*time + modulation)
(carrier = amp * sin(2 * M_PI * 3000 * time + modulation)).run(stream);
// noise = 0.01 * fs / 2 * np.random.randn(time.shape)
(noise = sqrt(0.01 * fs / 2) * random<float>({N}, NORMAL)).run(stream);
// noise *= np.exp(-time/5)
(noise = noise * exp(-1.0f * time / 5.0f)).run(stream);
// x = carrier + noise
(x = carrier + noise).run(stream);
for (uint32_t i = 0; i < num_iterations; i++) {
if (i == 2) { // Start timer on third loop to allow generation of plot
cudaEventRecord(start, stream);
}
// DFT Sample Frequencies (rfftfreq)
(freqs = (1.0 / (static_cast<float>(nfft) * 1 / fs)) *
linspace<0>(half_win, 0.0f, static_cast<float>(nfft) / 2.0f))
.run(stream);
// Create overlapping matrix of segments.
auto stackedMatrix = overlap(x, {nperseg}, {nstep});
// FFT along rows
(fftStackedMatrix = fft(stackedMatrix)).run(stream);
// Absolute value
(fftStackedMatrix = conj(fftStackedMatrix) * fftStackedMatrix)
.run(stream);
// Get real part and transpose
auto Sxx = fftStackedMatrix.RealView().Permute({1, 0});
// Spectral time axis
(s_time = linspace<0>(s_time_shape, static_cast<float>(nperseg) / 2.0f,
static_cast<float>(N - nperseg) / 2.0f + 1) /
fs)
.run(stream);
if (i == 1) {
#if MATX_ENABLE_VIZ
// Generate a spectrogram visualization using a contour plot
viz::contour(time, freqs, Sxx);
#else
printf("Not outputting plot since visualizations disabled\n");
#endif
}
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&time_ms, start, stop);
printf("Spectrogram Time Without Graphs = %.2fus per iteration\n",
time_ms * 1e3 / num_iterations);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream);
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
|
54ce57d3372c4497d3299262a0a51c9bf66e996c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmergecg.cu normal z -> c, Fri Sep 11 18:29:43 2015
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from cmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_ccgreduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_ccgmerge_spmvcsr_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ) {
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_ccgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
magmaFloatComplex val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
magmaFloatComplex val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_8(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_16(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_32(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_ccgmerge_spmvellpackrt_kernel2(
int n,
magmaFloatComplex * z,
magmaFloatComplex * d,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_ccgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
magmaFloatComplex val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_ccg_rhokernel(
magmaFloatComplex * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
magmaFloatComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dd magmaFloatComplex_ptr
input vector d
@param[out]
dz magmaFloatComplex_ptr
input vector z
@param[out]
skp magmaFloatComplex_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dd,
magmaFloatComplex_ptr dz,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_ccgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_ccgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_SELLP ) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaFloatComplex );
if ( A.alignment == 8)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_ccg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_ccgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * x,
magmaFloatComplex * r,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaFloatComplex rho = skp[3];
magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_ccg_alphabetakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
magmaFloatComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_C_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_ccg_d_kernel(
int n,
magmaFloatComplex * skp,
magmaFloatComplex * r,
magmaFloatComplex * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in,out]
dx magmaFloatComplex_ptr
input vector x
@param[in,out]
dr magmaFloatComplex_ptr
input/output vector r
@param[in]
dd magmaFloatComplex_ptr
input vector d
@param[in]
dz magmaFloatComplex_ptr
input vector z
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_xrbeta(
int n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dd,
magmaFloatComplex_ptr dz,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_ccgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, dr, dd );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 54ce57d3372c4497d3299262a0a51c9bf66e996c.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zmergecg.cu normal z -> c, Fri Sep 11 18:29:43 2015
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from cmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_ccgreduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_ccgmerge_spmvcsr_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ) {
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_ccgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
magmaFloatComplex val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
magmaFloatComplex val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_8(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_16(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_32(
int n,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_ccgmerge_spmvellpackrt_kernel2(
int n,
magmaFloatComplex * z,
magmaFloatComplex * d,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_ccgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
magmaFloatComplex val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * d,
magmaFloatComplex * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_ccg_rhokernel(
magmaFloatComplex * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
magmaFloatComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dd magmaFloatComplex_ptr
input vector d
@param[out]
dz magmaFloatComplex_ptr
input vector z
@param[out]
skp magmaFloatComplex_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dd,
magmaFloatComplex_ptr dz,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
magma_ccgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
magma_ccgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
magma_ccgmerge_spmvell_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_SELLP ) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( float( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaFloatComplex );
if ( A.alignment == 8)
magma_ccgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
magma_ccgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
magma_ccgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( float( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
magma_ccgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
magma_ccgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
magma_ccgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>>
( A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_ccg_rhokernel<<<Gs2, Bs2, 0>>>( skp );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_ccgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * x,
magmaFloatComplex * r,
magmaFloatComplex * d,
magmaFloatComplex * z,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaFloatComplex rho = skp[3];
magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_ccg_alphabetakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
magmaFloatComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_C_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_ccg_d_kernel(
int n,
magmaFloatComplex * skp,
magmaFloatComplex * r,
magmaFloatComplex * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in,out]
dx magmaFloatComplex_ptr
input vector x
@param[in,out]
dr magmaFloatComplex_ptr
input/output vector r
@param[in]
dd magmaFloatComplex_ptr
input vector d
@param[in]
dz magmaFloatComplex_ptr
input vector z
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_xrbeta(
int n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dd,
magmaFloatComplex_ptr dz,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_ccgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>>
( n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_ccg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_ccg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, dr, dd );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
61434c843874ad8b09403ac062f999bf176e76c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "fcns.h"
#include <stdio.h>
#include <stdlib.h>
// Definition of Matrix Filter Function CPU
float MatFilter(const Matrix filter,const Matrix oldmat, Matrix newmat)
{
// Load to device memory
Matrix d_filter;
d_filter.width = filter.width;
d_filter.height = filter.height;
size_t size = filter.width * filter.height * sizeof(double);
hipMalloc(&d_filter.elements, size);
hipMemcpy(d_filter.elements, filter.elements, size,
hipMemcpyHostToDevice);
Matrix d_oldmat;
d_oldmat.width = oldmat.width;
d_oldmat.height = oldmat.height;
size = oldmat.width * oldmat.height * sizeof(double);
hipMalloc(&d_oldmat.elements, size);
hipMemcpy(d_oldmat.elements, oldmat.elements, size,
hipMemcpyHostToDevice);
Matrix d_newmat;
d_newmat.width = newmat.width;
d_newmat.height = newmat.height;
size = newmat.width * newmat.height * sizeof(double);
hipMalloc(&d_newmat.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((newmat.width+BLOCK_SIZE-1) / dimBlock.x,
(newmat.height+BLOCK_SIZE-1)/ dimBlock.y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MatFilterKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_filter, d_oldmat, d_newmat);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapseTime;
hipEventElapsedTime(&elapseTime, start, stop);
// Read C from device memory
hipMemcpy(newmat.elements, d_newmat.elements, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_filter.elements);
hipFree(d_oldmat.elements);
hipFree(d_newmat.elements);
return elapseTime/1000.0;
}
// Definition of Matrix Filter Function GPU
__global__ void MatFilterKernel(Matrix filter, Matrix oldmat, Matrix newmat)
{
// Each thread computes one element of newmat
// by accumulating results into tempval
double tempval = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Apply Filter by row and column
for(int k=0;k<filter.height;k++){
for(int m=0; m<filter.width; m++){
tempval+=oldmat.elements[(row+k)*oldmat.width+(col+m)]
*filter.elements[k*filter.width+m];
}
}
newmat.elements[row * newmat.width + col] = tempval;
}
void MatPrint(const Matrix M){
for(int i=0; i<M.height; i++){
for(int j=0; j<M.width; j++){
printf("%f ",M.elements[i*M.width+j]);
}
printf("\n");
}
}
| 61434c843874ad8b09403ac062f999bf176e76c9.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "fcns.h"
#include <stdio.h>
#include <stdlib.h>
// Definition of Matrix Filter Function CPU
float MatFilter(const Matrix filter,const Matrix oldmat, Matrix newmat)
{
// Load to device memory
Matrix d_filter;
d_filter.width = filter.width;
d_filter.height = filter.height;
size_t size = filter.width * filter.height * sizeof(double);
cudaMalloc(&d_filter.elements, size);
cudaMemcpy(d_filter.elements, filter.elements, size,
cudaMemcpyHostToDevice);
Matrix d_oldmat;
d_oldmat.width = oldmat.width;
d_oldmat.height = oldmat.height;
size = oldmat.width * oldmat.height * sizeof(double);
cudaMalloc(&d_oldmat.elements, size);
cudaMemcpy(d_oldmat.elements, oldmat.elements, size,
cudaMemcpyHostToDevice);
Matrix d_newmat;
d_newmat.width = newmat.width;
d_newmat.height = newmat.height;
size = newmat.width * newmat.height * sizeof(double);
cudaMalloc(&d_newmat.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((newmat.width+BLOCK_SIZE-1) / dimBlock.x,
(newmat.height+BLOCK_SIZE-1)/ dimBlock.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MatFilterKernel<<<dimGrid, dimBlock>>>(d_filter, d_oldmat, d_newmat);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapseTime;
cudaEventElapsedTime(&elapseTime, start, stop);
// Read C from device memory
cudaMemcpy(newmat.elements, d_newmat.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_filter.elements);
cudaFree(d_oldmat.elements);
cudaFree(d_newmat.elements);
return elapseTime/1000.0;
}
// Definition of Matrix Filter Function GPU
__global__ void MatFilterKernel(Matrix filter, Matrix oldmat, Matrix newmat)
{
// Each thread computes one element of newmat
// by accumulating results into tempval
double tempval = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Apply Filter by row and column
for(int k=0;k<filter.height;k++){
for(int m=0; m<filter.width; m++){
tempval+=oldmat.elements[(row+k)*oldmat.width+(col+m)]
*filter.elements[k*filter.width+m];
}
}
newmat.elements[row * newmat.width + col] = tempval;
}
void MatPrint(const Matrix M){
for(int i=0; i<M.height; i++){
for(int j=0; j<M.width; j++){
printf("%f ",M.elements[i*M.width+j]);
}
printf("\n");
}
}
|
5ddec009e8a4da21d4268dad2e4234f31c9e35c9.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps);
} else if (pneg < eps) {
return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps);
} else {
return -y * ::log(py) - (1.0f - y) * ::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - ::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (y < eps) y = eps;
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -::log(-theta);
bst_float c = 1. / psi * ::log(y/psi) - ::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| 5ddec009e8a4da21d4268dad2e4234f31c9e35c9.cu | /*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*
* The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset.
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowRMSLE {
char const* Name() const {
return "rmsle";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = std::log1p(label) - std::log1p(pred);
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMAPE {
const char *Name() const {
return "mape";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs((label - pred) / label);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps);
} else if (pneg < eps) {
return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps);
} else {
return -y * std::log(py) - (1.0f - y) * std::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalRowMPHE {
char const *Name() const {
return "mphe";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return std::sqrt( 1 + diff * diff) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - std::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (y < eps) y = eps;
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -std::log(-theta);
bst_float c = 1. / psi * std::log(y/psi) - std::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return wsum == 0 ? esum : esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_{policy_};
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(RMSLE, "rmsle")
.describe("Rooted mean square log error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSLE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(MAPE, "mape")
.describe("Mean absolute percentage error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAPE>(); });
XGBOOST_REGISTER_METRIC(MPHE, "mphe")
.describe("Mean Pseudo Huber error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMPHE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
93e0ce5e6e52a0edbb5938105dcadcb9b5a281a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/unpooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int unpooled_height, const int unpooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
const Dtype* bottom_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1));
int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1));
int unpooled_index = uph * unpooled_width + upw;
top_data += (n * channels + c) * unpooled_height * unpooled_width;
if (bottom_mask) {
const int mask_index = bottom_mask[index];
top_data[mask_index] = bottom_data[index];
} else {
top_data[unpooled_index] = bottom_data[index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % unpooled_width + pad_w;
int h = (index / unpooled_width) % unpooled_height + pad_h;
int c = (index / unpooled_width / unpooled_height) % channels;
int n = index / unpooled_width / unpooled_height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, width);
Dtype distval = 0;
bottom_data += (n * channels + c) * height * width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
distval += bottom_data[ph * width + pw] / pool_size;
}
}
top_data[index] = distval;
}
}
template <typename Dtype>
__global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % unpooled_width + pad_w;
int h = (index / unpooled_width) % unpooled_height + pad_h;
int c = (index / unpooled_width / unpooled_height) % channels;
int n = index / unpooled_width / unpooled_height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, width);
Dtype distval = 0;
bottom_data += (n * channels + c) * height * width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
distval += bottom_data[ph * width + pw];
}
}
top_data[index] = distval;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
int count = bottom[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0.), top_data);
// We'll get the mask from bottom[1] if it's of size >1.
const bool use_bottom_mask = bottom.size() > 1;
const Dtype* bottom_mask = NULL;
switch (this->layer_param_.unpooling_param().unpool()) {
case UnpoolingParameter_UnpoolMethod_MAX:
if (use_bottom_mask) {
bottom_mask = bottom[1]->gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, unpooled_height_, unpooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
bottom_mask);
break;
case UnpoolingParameter_UnpoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AveUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(), bottom_data, bottom[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case UnpoolingParameter_UnpoolMethod_TILE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TileUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(), bottom_data, bottom[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
default:
LOG(FATAL) << "Unknown unpooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* bottom_mask, const int num, const int channels,
const int height, const int width, const int unpooled_height,
const int unpooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1));
int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1));
int unpooled_index = uph * unpooled_width + upw;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
if (bottom_mask) {
const int mask_index = bottom_mask[index];
bottom_diff[index] = top_diff[mask_index];
} else {
bottom_diff[index] = top_diff[unpooled_index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, unpooled_height);
wend = min(wend, unpooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
gradient += top_diff[h * unpooled_width + w];
}
}
bottom_diff[index] = gradient / pool_size;
}
}
template <typename Dtype>
__global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, unpooled_height);
wend = min(wend, unpooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
gradient += top_diff[h * unpooled_width + w];
}
}
bottom_diff[index] = gradient / pool_size;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll get the mask from bottom[1] if it's of size >1.
const bool use_bottom_mask = bottom.size() > 1;
const Dtype* bottom_mask = NULL;
switch (this->layer_param_.unpooling_param().unpool()) {
case UnpoolingParameter_UnpoolMethod_MAX:
if (use_bottom_mask) {
bottom_mask = bottom[1]->gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_mask, top[0]->num(), channels_,
height_, width_, unpooled_height_, unpooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case UnpoolingParameter_UnpoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AveUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), top_diff, top[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case UnpoolingParameter_UnpoolMethod_TILE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TileUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), top_diff, top[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown unpooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer);
} // namespace caffe
| 93e0ce5e6e52a0edbb5938105dcadcb9b5a281a8.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/unpooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int unpooled_height, const int unpooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
const Dtype* bottom_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1));
int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1));
int unpooled_index = uph * unpooled_width + upw;
top_data += (n * channels + c) * unpooled_height * unpooled_width;
if (bottom_mask) {
const int mask_index = bottom_mask[index];
top_data[mask_index] = bottom_data[index];
} else {
top_data[unpooled_index] = bottom_data[index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % unpooled_width + pad_w;
int h = (index / unpooled_width) % unpooled_height + pad_h;
int c = (index / unpooled_width / unpooled_height) % channels;
int n = index / unpooled_width / unpooled_height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, width);
Dtype distval = 0;
bottom_data += (n * channels + c) * height * width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
distval += bottom_data[ph * width + pw] / pool_size;
}
}
top_data[index] = distval;
}
}
template <typename Dtype>
__global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % unpooled_width + pad_w;
int h = (index / unpooled_width) % unpooled_height + pad_h;
int c = (index / unpooled_width / unpooled_height) % channels;
int n = index / unpooled_width / unpooled_height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, width);
Dtype distval = 0;
bottom_data += (n * channels + c) * height * width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
distval += bottom_data[ph * width + pw];
}
}
top_data[index] = distval;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
int count = bottom[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0.), top_data);
// We'll get the mask from bottom[1] if it's of size >1.
const bool use_bottom_mask = bottom.size() > 1;
const Dtype* bottom_mask = NULL;
switch (this->layer_param_.unpooling_param().unpool()) {
case UnpoolingParameter_UnpoolMethod_MAX:
if (use_bottom_mask) {
bottom_mask = bottom[1]->gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, unpooled_height_, unpooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
bottom_mask);
break;
case UnpoolingParameter_UnpoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AveUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->count(), bottom_data, bottom[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case UnpoolingParameter_UnpoolMethod_TILE:
// NOLINT_NEXT_LINE(whitespace/operators)
TileUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->count(), bottom_data, bottom[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
default:
LOG(FATAL) << "Unknown unpooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* bottom_mask, const int num, const int channels,
const int height, const int width, const int unpooled_height,
const int unpooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1));
int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1));
int unpooled_index = uph * unpooled_width + upw;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
if (bottom_mask) {
const int mask_index = bottom_mask[index];
bottom_diff[index] = top_diff[mask_index];
} else {
bottom_diff[index] = top_diff[unpooled_index];
}
}
}
template <typename Dtype>
__global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, unpooled_height);
wend = min(wend, unpooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
gradient += top_diff[h * unpooled_width + w];
}
}
bottom_diff[index] = gradient / pool_size;
}
}
template <typename Dtype>
__global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int unpooled_height,
const int unpooled_width, const int height, const int width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % width;
int ph = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, unpooled_height + pad_h);
int wend = min(wstart + kernel_w, unpooled_width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, unpooled_height);
wend = min(wend, unpooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * unpooled_height * unpooled_width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
gradient += top_diff[h * unpooled_width + w];
}
}
bottom_diff[index] = gradient / pool_size;
}
}
template <typename Dtype>
void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll get the mask from bottom[1] if it's of size >1.
const bool use_bottom_mask = bottom.size() > 1;
const Dtype* bottom_mask = NULL;
switch (this->layer_param_.unpooling_param().unpool()) {
case UnpoolingParameter_UnpoolMethod_MAX:
if (use_bottom_mask) {
bottom_mask = bottom[1]->gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_mask, top[0]->num(), channels_,
height_, width_, unpooled_height_, unpooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case UnpoolingParameter_UnpoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AveUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), top_diff, top[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case UnpoolingParameter_UnpoolMethod_TILE:
// NOLINT_NEXT_LINE(whitespace/operators)
TileUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), top_diff, top[0]->num(), channels_,
unpooled_height_, unpooled_width_, height_, width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown unpooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer);
} // namespace caffe
|
91fcda51665dfd8a6359e024e1dbea6adf380016.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_testkernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_testkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_testkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_testkernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 91fcda51665dfd8a6359e024e1dbea6adf380016.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_testkernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_testkernel<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_testkernel<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_testkernel<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.